Merge tag 'wireless-next-2023-11-27' of git://git.kernel.org/pub/scm/linux/kernel...
[linux-2.6-block.git] / include / linux / ksm.h
CommitLineData
b2441318 1/* SPDX-License-Identifier: GPL-2.0 */
f8af4da3
HD
2#ifndef __LINUX_KSM_H
3#define __LINUX_KSM_H
4/*
5 * Memory merging support.
6 *
7 * This code enables dynamic sharing of identical pages found in different
8 * memory areas, even if they are not shared by fork().
9 */
10
11#include <linux/bitops.h>
12#include <linux/mm.h>
5ad64688
HD
13#include <linux/pagemap.h>
14#include <linux/rmap.h>
f8af4da3 15#include <linux/sched.h>
f7ccbae4 16#include <linux/sched/coredump.h>
f8af4da3
HD
17
18#ifdef CONFIG_KSM
19int ksm_madvise(struct vm_area_struct *vma, unsigned long start,
20 unsigned long end, int advice, unsigned long *vm_flags);
d7597f59
SR
21
22void ksm_add_vma(struct vm_area_struct *vma);
23int ksm_enable_merge_any(struct mm_struct *mm);
24139c07 24int ksm_disable_merge_any(struct mm_struct *mm);
2c281f54 25int ksm_disable(struct mm_struct *mm);
d7597f59 26
f8af4da3 27int __ksm_enter(struct mm_struct *mm);
1c2fb7a4 28void __ksm_exit(struct mm_struct *mm);
79271476 29/*
30 * To identify zeropages that were mapped by KSM, we reuse the dirty bit
31 * in the PTE. If the PTE is dirty, the zeropage was mapped by KSM when
32 * deduplicating memory.
33 */
34#define is_ksm_zero_pte(pte) (is_zero_pfn(pte_pfn(pte)) && pte_dirty(pte))
f8af4da3 35
e2942062 36extern unsigned long ksm_zero_pages;
37
6080d19f 38static inline void ksm_might_unmap_zero_page(struct mm_struct *mm, pte_t pte)
e2942062 39{
6080d19f 40 if (is_ksm_zero_pte(pte)) {
e2942062 41 ksm_zero_pages--;
6080d19f 42 mm->ksm_zero_pages--;
43 }
e2942062 44}
45
f8af4da3
HD
46static inline int ksm_fork(struct mm_struct *mm, struct mm_struct *oldmm)
47{
d7597f59
SR
48 int ret;
49
50 if (test_bit(MMF_VM_MERGEABLE, &oldmm->flags)) {
51 ret = __ksm_enter(mm);
52 if (ret)
53 return ret;
54 }
55
56 if (test_bit(MMF_VM_MERGE_ANY, &oldmm->flags))
57 set_bit(MMF_VM_MERGE_ANY, &mm->flags);
58
f8af4da3
HD
59 return 0;
60}
61
1c2fb7a4 62static inline void ksm_exit(struct mm_struct *mm)
f8af4da3
HD
63{
64 if (test_bit(MMF_VM_MERGEABLE, &mm->flags))
1c2fb7a4 65 __ksm_exit(mm);
f8af4da3 66}
9a840895 67
5ad64688
HD
68/*
69 * When do_swap_page() first faults in from swap what used to be a KSM page,
70 * no problem, it will be assigned to this vma's anon_vma; but thereafter,
71 * it might be faulted into a different anon_vma (or perhaps to a different
72 * offset in the same anon_vma). do_swap_page() cannot do all the locking
73 * needed to reconstitute a cross-anon_vma KSM page: for now it has to make
74 * a copy, and leave remerging the pages to a later pass of ksmd.
75 *
76 * We'd like to make this conditional on vma->vm_flags & VM_MERGEABLE,
77 * but what if the vma was unmerged while the page was swapped out?
78 */
cbf86cfe
HD
79struct page *ksm_might_need_to_copy(struct page *page,
80 struct vm_area_struct *vma, unsigned long address);
5ad64688 81
6d4675e6 82void rmap_walk_ksm(struct folio *folio, struct rmap_walk_control *rwc);
19138349 83void folio_migrate_ksm(struct folio *newfolio, struct folio *folio);
5ad64688 84
4248d008
LX
85#ifdef CONFIG_MEMORY_FAILURE
86void collect_procs_ksm(struct page *page, struct list_head *to_kill,
87 int force_early);
88#endif
d21077fb
SR
89
90#ifdef CONFIG_PROC_FS
91long ksm_process_profit(struct mm_struct *);
92#endif /* CONFIG_PROC_FS */
93
f8af4da3
HD
94#else /* !CONFIG_KSM */
95
d7597f59
SR
96static inline void ksm_add_vma(struct vm_area_struct *vma)
97{
98}
99
2c281f54
DH
100static inline int ksm_disable(struct mm_struct *mm)
101{
102 return 0;
103}
104
f8af4da3
HD
105static inline int ksm_fork(struct mm_struct *mm, struct mm_struct *oldmm)
106{
107 return 0;
108}
109
1c2fb7a4 110static inline void ksm_exit(struct mm_struct *mm)
f8af4da3
HD
111{
112}
9a840895 113
6080d19f 114static inline void ksm_might_unmap_zero_page(struct mm_struct *mm, pte_t pte)
e2942062 115{
116}
117
4248d008
LX
118#ifdef CONFIG_MEMORY_FAILURE
119static inline void collect_procs_ksm(struct page *page,
120 struct list_head *to_kill, int force_early)
121{
122}
123#endif
124
f42647ac
HD
125#ifdef CONFIG_MMU
126static inline int ksm_madvise(struct vm_area_struct *vma, unsigned long start,
127 unsigned long end, int advice, unsigned long *vm_flags)
128{
129 return 0;
130}
131
cbf86cfe 132static inline struct page *ksm_might_need_to_copy(struct page *page,
5ad64688
HD
133 struct vm_area_struct *vma, unsigned long address)
134{
cbf86cfe 135 return page;
5ad64688
HD
136}
137
2f031c6f 138static inline void rmap_walk_ksm(struct folio *folio,
6d4675e6 139 struct rmap_walk_control *rwc)
e9995ef9 140{
e9995ef9
HD
141}
142
19138349 143static inline void folio_migrate_ksm(struct folio *newfolio, struct folio *old)
e9995ef9
HD
144{
145}
f42647ac 146#endif /* CONFIG_MMU */
f8af4da3
HD
147#endif /* !CONFIG_KSM */
148
5ad64688 149#endif /* __LINUX_KSM_H */