Linux 6.11-rc3
[linux-block.git] / include / linux / ksm.h
CommitLineData
b2441318 1/* SPDX-License-Identifier: GPL-2.0 */
f8af4da3
HD
2#ifndef __LINUX_KSM_H
3#define __LINUX_KSM_H
4/*
5 * Memory merging support.
6 *
7 * This code enables dynamic sharing of identical pages found in different
8 * memory areas, even if they are not shared by fork().
9 */
10
11#include <linux/bitops.h>
12#include <linux/mm.h>
5ad64688
HD
13#include <linux/pagemap.h>
14#include <linux/rmap.h>
f8af4da3 15#include <linux/sched.h>
f7ccbae4 16#include <linux/sched/coredump.h>
f8af4da3
HD
17
18#ifdef CONFIG_KSM
19int ksm_madvise(struct vm_area_struct *vma, unsigned long start,
20 unsigned long end, int advice, unsigned long *vm_flags);
d7597f59
SR
21
22void ksm_add_vma(struct vm_area_struct *vma);
23int ksm_enable_merge_any(struct mm_struct *mm);
24139c07 24int ksm_disable_merge_any(struct mm_struct *mm);
2c281f54 25int ksm_disable(struct mm_struct *mm);
d7597f59 26
f8af4da3 27int __ksm_enter(struct mm_struct *mm);
1c2fb7a4 28void __ksm_exit(struct mm_struct *mm);
79271476 29/*
30 * To identify zeropages that were mapped by KSM, we reuse the dirty bit
31 * in the PTE. If the PTE is dirty, the zeropage was mapped by KSM when
32 * deduplicating memory.
33 */
34#define is_ksm_zero_pte(pte) (is_zero_pfn(pte_pfn(pte)) && pte_dirty(pte))
f8af4da3 35
c2dc78b8
CZ
36extern atomic_long_t ksm_zero_pages;
37
38static inline void ksm_map_zero_page(struct mm_struct *mm)
39{
40 atomic_long_inc(&ksm_zero_pages);
41 atomic_long_inc(&mm->ksm_zero_pages);
42}
e2942062 43
6080d19f 44static inline void ksm_might_unmap_zero_page(struct mm_struct *mm, pte_t pte)
e2942062 45{
6080d19f 46 if (is_ksm_zero_pte(pte)) {
c2dc78b8
CZ
47 atomic_long_dec(&ksm_zero_pages);
48 atomic_long_dec(&mm->ksm_zero_pages);
6080d19f 49 }
e2942062 50}
51
c2dc78b8
CZ
52static inline long mm_ksm_zero_pages(struct mm_struct *mm)
53{
54 return atomic_long_read(&mm->ksm_zero_pages);
55}
56
f8af4da3
HD
57static inline int ksm_fork(struct mm_struct *mm, struct mm_struct *oldmm)
58{
7edea4c6
JT
59 if (test_bit(MMF_VM_MERGEABLE, &oldmm->flags))
60 return __ksm_enter(mm);
d7597f59 61
f8af4da3
HD
62 return 0;
63}
64
3a9e567c
JT
65static inline int ksm_execve(struct mm_struct *mm)
66{
67 if (test_bit(MMF_VM_MERGE_ANY, &mm->flags))
68 return __ksm_enter(mm);
69
70 return 0;
71}
72
1c2fb7a4 73static inline void ksm_exit(struct mm_struct *mm)
f8af4da3
HD
74{
75 if (test_bit(MMF_VM_MERGEABLE, &mm->flags))
1c2fb7a4 76 __ksm_exit(mm);
f8af4da3 77}
9a840895 78
5ad64688
HD
79/*
80 * When do_swap_page() first faults in from swap what used to be a KSM page,
81 * no problem, it will be assigned to this vma's anon_vma; but thereafter,
82 * it might be faulted into a different anon_vma (or perhaps to a different
83 * offset in the same anon_vma). do_swap_page() cannot do all the locking
84 * needed to reconstitute a cross-anon_vma KSM page: for now it has to make
85 * a copy, and leave remerging the pages to a later pass of ksmd.
86 *
87 * We'd like to make this conditional on vma->vm_flags & VM_MERGEABLE,
88 * but what if the vma was unmerged while the page was swapped out?
89 */
96db66d9 90struct folio *ksm_might_need_to_copy(struct folio *folio,
1486fb50 91 struct vm_area_struct *vma, unsigned long addr);
5ad64688 92
6d4675e6 93void rmap_walk_ksm(struct folio *folio, struct rmap_walk_control *rwc);
19138349 94void folio_migrate_ksm(struct folio *newfolio, struct folio *folio);
b650e1d2
MWO
95void collect_procs_ksm(struct folio *folio, struct page *page,
96 struct list_head *to_kill, int force_early);
d21077fb 97long ksm_process_profit(struct mm_struct *);
d21077fb 98
f8af4da3
HD
99#else /* !CONFIG_KSM */
100
d7597f59
SR
101static inline void ksm_add_vma(struct vm_area_struct *vma)
102{
103}
104
2c281f54
DH
105static inline int ksm_disable(struct mm_struct *mm)
106{
107 return 0;
108}
109
f8af4da3
HD
110static inline int ksm_fork(struct mm_struct *mm, struct mm_struct *oldmm)
111{
112 return 0;
113}
114
3a9e567c
JT
115static inline int ksm_execve(struct mm_struct *mm)
116{
117 return 0;
118}
119
1c2fb7a4 120static inline void ksm_exit(struct mm_struct *mm)
f8af4da3
HD
121{
122}
9a840895 123
6080d19f 124static inline void ksm_might_unmap_zero_page(struct mm_struct *mm, pte_t pte)
e2942062 125{
126}
127
b650e1d2 128static inline void collect_procs_ksm(struct folio *folio, struct page *page,
4248d008
LX
129 struct list_head *to_kill, int force_early)
130{
131}
4248d008 132
f42647ac
HD
133#ifdef CONFIG_MMU
134static inline int ksm_madvise(struct vm_area_struct *vma, unsigned long start,
135 unsigned long end, int advice, unsigned long *vm_flags)
136{
137 return 0;
138}
139
96db66d9 140static inline struct folio *ksm_might_need_to_copy(struct folio *folio,
1486fb50 141 struct vm_area_struct *vma, unsigned long addr)
5ad64688 142{
96db66d9 143 return folio;
5ad64688
HD
144}
145
2f031c6f 146static inline void rmap_walk_ksm(struct folio *folio,
6d4675e6 147 struct rmap_walk_control *rwc)
e9995ef9 148{
e9995ef9
HD
149}
150
19138349 151static inline void folio_migrate_ksm(struct folio *newfolio, struct folio *old)
e9995ef9
HD
152{
153}
f42647ac 154#endif /* CONFIG_MMU */
f8af4da3
HD
155#endif /* !CONFIG_KSM */
156
5ad64688 157#endif /* __LINUX_KSM_H */