Commit | Line | Data |
---|---|---|
b2441318 | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
1da177e4 LT |
2 | #ifndef _LINUX_RMAP_H |
3 | #define _LINUX_RMAP_H | |
4 | /* | |
5 | * Declarations for Reverse Mapping functions in mm/rmap.c | |
6 | */ | |
7 | ||
1da177e4 LT |
8 | #include <linux/list.h> |
9 | #include <linux/slab.h> | |
10 | #include <linux/mm.h> | |
5a505085 | 11 | #include <linux/rwsem.h> |
bed7161a | 12 | #include <linux/memcontrol.h> |
ace71a19 | 13 | #include <linux/highmem.h> |
2aff7a47 | 14 | #include <linux/pagemap.h> |
1da177e4 LT |
15 | |
16 | /* | |
17 | * The anon_vma heads a list of private "related" vmas, to scan if | |
18 | * an anonymous page pointing to this anon_vma needs to be unmapped: | |
19 | * the vmas on the list will be related by forking, or by splitting. | |
20 | * | |
21 | * Since vmas come and go as they are split and merged (particularly | |
22 | * in mprotect), the mapping field of an anonymous page cannot point | |
23 | * directly to a vma: instead it points to an anon_vma, on whose list | |
24 | * the related vmas can be easily linked or unlinked. | |
25 | * | |
26 | * After unlinking the last vma on the list, we must garbage collect | |
27 | * the anon_vma object itself: we're guaranteed no page can be | |
28 | * pointing to this anon_vma once its vma list is empty. | |
29 | */ | |
30 | struct anon_vma { | |
5a505085 IM |
31 | struct anon_vma *root; /* Root of this anon_vma tree */ |
32 | struct rw_semaphore rwsem; /* W: modification, R: walking the list */ | |
7f60c214 | 33 | /* |
83813267 | 34 | * The refcount is taken on an anon_vma when there is no |
7f60c214 MG |
35 | * guarantee that the vma of page tables will exist for |
36 | * the duration of the operation. A caller that takes | |
37 | * the reference is responsible for clearing up the | |
38 | * anon_vma if they are the last user on release | |
39 | */ | |
83813267 PZ |
40 | atomic_t refcount; |
41 | ||
7a3ef208 KK |
42 | /* |
43 | * Count of child anon_vmas and VMAs which points to this anon_vma. | |
44 | * | |
45 | * This counter is used for making decision about reusing anon_vma | |
46 | * instead of forking new one. See comments in function anon_vma_clone. | |
47 | */ | |
48 | unsigned degree; | |
49 | ||
50 | struct anon_vma *parent; /* Parent of this anon_vma */ | |
51 | ||
7906d00c | 52 | /* |
bf181b9f | 53 | * NOTE: the LSB of the rb_root.rb_node is set by |
7906d00c | 54 | * mm_take_all_locks() _after_ taking the above lock. So the |
bf181b9f | 55 | * rb_root must only be read/written after taking the above lock |
7906d00c AA |
56 | * to be sure to see a valid next pointer. The LSB bit itself |
57 | * is serialized by a system wide lock only visible to | |
58 | * mm_take_all_locks() (mm_all_locks_mutex). | |
59 | */ | |
f808c13f DB |
60 | |
61 | /* Interval tree of private "related" vmas */ | |
62 | struct rb_root_cached rb_root; | |
5beb4930 RR |
63 | }; |
64 | ||
65 | /* | |
66 | * The copy-on-write semantics of fork mean that an anon_vma | |
67 | * can become associated with multiple processes. Furthermore, | |
68 | * each child process will have its own anon_vma, where new | |
69 | * pages for that process are instantiated. | |
70 | * | |
71 | * This structure allows us to find the anon_vmas associated | |
72 | * with a VMA, or the VMAs associated with an anon_vma. | |
73 | * The "same_vma" list contains the anon_vma_chains linking | |
74 | * all the anon_vmas associated with this VMA. | |
bf181b9f | 75 | * The "rb" field indexes on an interval tree the anon_vma_chains |
5beb4930 RR |
76 | * which link all the VMAs associated with this anon_vma. |
77 | */ | |
78 | struct anon_vma_chain { | |
79 | struct vm_area_struct *vma; | |
80 | struct anon_vma *anon_vma; | |
c1e8d7c6 | 81 | struct list_head same_vma; /* locked by mmap_lock & page_table_lock */ |
5a505085 | 82 | struct rb_node rb; /* locked by anon_vma->rwsem */ |
bf181b9f | 83 | unsigned long rb_subtree_last; |
ed8ea815 ML |
84 | #ifdef CONFIG_DEBUG_VM_RB |
85 | unsigned long cached_vma_start, cached_vma_last; | |
86 | #endif | |
1da177e4 LT |
87 | }; |
88 | ||
02c6de8d | 89 | enum ttu_flags { |
a128ca71 SL |
90 | TTU_SPLIT_HUGE_PMD = 0x4, /* split huge PMD if any */ |
91 | TTU_IGNORE_MLOCK = 0x8, /* ignore mlock */ | |
732ed558 | 92 | TTU_SYNC = 0x10, /* avoid racy checks with PVMW_SYNC */ |
a128ca71 SL |
93 | TTU_IGNORE_HWPOISON = 0x20, /* corrupted page is recoverable */ |
94 | TTU_BATCH_FLUSH = 0x40, /* Batch TLB flushes where possible | |
72b252ae MG |
95 | * and caller guarantees they will |
96 | * do a final flush if necessary */ | |
b5ff8161 | 97 | TTU_RMAP_LOCKED = 0x80, /* do not grab rmap lock: |
2a52bcbc | 98 | * caller holds it */ |
02c6de8d MK |
99 | }; |
100 | ||
1da177e4 | 101 | #ifdef CONFIG_MMU |
76545066 RR |
102 | static inline void get_anon_vma(struct anon_vma *anon_vma) |
103 | { | |
83813267 | 104 | atomic_inc(&anon_vma->refcount); |
76545066 RR |
105 | } |
106 | ||
01d8b20d PZ |
107 | void __put_anon_vma(struct anon_vma *anon_vma); |
108 | ||
109 | static inline void put_anon_vma(struct anon_vma *anon_vma) | |
110 | { | |
111 | if (atomic_dec_and_test(&anon_vma->refcount)) | |
112 | __put_anon_vma(anon_vma); | |
113 | } | |
1da177e4 | 114 | |
4fc3f1d6 | 115 | static inline void anon_vma_lock_write(struct anon_vma *anon_vma) |
cba48b98 | 116 | { |
5a505085 | 117 | down_write(&anon_vma->root->rwsem); |
cba48b98 RR |
118 | } |
119 | ||
08b52706 | 120 | static inline void anon_vma_unlock_write(struct anon_vma *anon_vma) |
cba48b98 | 121 | { |
5a505085 | 122 | up_write(&anon_vma->root->rwsem); |
cba48b98 RR |
123 | } |
124 | ||
4fc3f1d6 IM |
125 | static inline void anon_vma_lock_read(struct anon_vma *anon_vma) |
126 | { | |
127 | down_read(&anon_vma->root->rwsem); | |
128 | } | |
129 | ||
130 | static inline void anon_vma_unlock_read(struct anon_vma *anon_vma) | |
131 | { | |
132 | up_read(&anon_vma->root->rwsem); | |
133 | } | |
134 | ||
135 | ||
1da177e4 LT |
136 | /* |
137 | * anon_vma helper functions. | |
138 | */ | |
139 | void anon_vma_init(void); /* create anon_vma_cachep */ | |
d5a187da | 140 | int __anon_vma_prepare(struct vm_area_struct *); |
5beb4930 RR |
141 | void unlink_anon_vmas(struct vm_area_struct *); |
142 | int anon_vma_clone(struct vm_area_struct *, struct vm_area_struct *); | |
143 | int anon_vma_fork(struct vm_area_struct *, struct vm_area_struct *); | |
1da177e4 | 144 | |
d5a187da VB |
145 | static inline int anon_vma_prepare(struct vm_area_struct *vma) |
146 | { | |
147 | if (likely(vma->anon_vma)) | |
148 | return 0; | |
149 | ||
150 | return __anon_vma_prepare(vma); | |
151 | } | |
152 | ||
5beb4930 RR |
153 | static inline void anon_vma_merge(struct vm_area_struct *vma, |
154 | struct vm_area_struct *next) | |
155 | { | |
81d1b09c | 156 | VM_BUG_ON_VMA(vma->anon_vma != next->anon_vma, vma); |
5beb4930 RR |
157 | unlink_anon_vmas(next); |
158 | } | |
159 | ||
01d8b20d PZ |
160 | struct anon_vma *page_get_anon_vma(struct page *page); |
161 | ||
d281ee61 KS |
162 | /* bitflags for do_page_add_anon_rmap() */ |
163 | #define RMAP_EXCLUSIVE 0x01 | |
164 | #define RMAP_COMPOUND 0x02 | |
165 | ||
1da177e4 LT |
166 | /* |
167 | * rmap interfaces called when adding or removing pte of page | |
168 | */ | |
5a49973d | 169 | void page_move_anon_rmap(struct page *, struct vm_area_struct *); |
d281ee61 | 170 | void page_add_anon_rmap(struct page *, struct vm_area_struct *, |
cea86fe2 | 171 | unsigned long address, bool compound); |
ad8c2ee8 | 172 | void do_page_add_anon_rmap(struct page *, struct vm_area_struct *, |
cea86fe2 | 173 | unsigned long address, int flags); |
d281ee61 | 174 | void page_add_new_anon_rmap(struct page *, struct vm_area_struct *, |
cea86fe2 HD |
175 | unsigned long address, bool compound); |
176 | void page_add_file_rmap(struct page *, struct vm_area_struct *, | |
177 | bool compound); | |
178 | void page_remove_rmap(struct page *, struct vm_area_struct *, | |
179 | bool compound); | |
0fe6e20b | 180 | void hugepage_add_anon_rmap(struct page *, struct vm_area_struct *, |
cea86fe2 | 181 | unsigned long address); |
0fe6e20b | 182 | void hugepage_add_new_anon_rmap(struct page *, struct vm_area_struct *, |
cea86fe2 | 183 | unsigned long address); |
0fe6e20b | 184 | |
53f9263b | 185 | static inline void page_dup_rmap(struct page *page, bool compound) |
1da177e4 | 186 | { |
53f9263b | 187 | atomic_inc(compound ? compound_mapcount_ptr(page) : &page->_mapcount); |
1da177e4 LT |
188 | } |
189 | ||
190 | /* | |
191 | * Called from mm/vmscan.c to handle paging out | |
192 | */ | |
b3ac0413 | 193 | int folio_referenced(struct folio *, int is_locked, |
72835c86 | 194 | struct mem_cgroup *memcg, unsigned long *vm_flags); |
5ad64688 | 195 | |
4b8554c5 | 196 | void try_to_migrate(struct folio *folio, enum ttu_flags flags); |
869f7ee6 | 197 | void try_to_unmap(struct folio *, enum ttu_flags flags); |
1da177e4 | 198 | |
b756a3b5 AP |
199 | int make_device_exclusive_range(struct mm_struct *mm, unsigned long start, |
200 | unsigned long end, struct page **pages, | |
201 | void *arg); | |
202 | ||
ace71a19 KS |
203 | /* Avoid racy checks */ |
204 | #define PVMW_SYNC (1 << 0) | |
2aff7a47 | 205 | /* Look for migration entries rather than present PTEs */ |
ace71a19 KS |
206 | #define PVMW_MIGRATION (1 << 1) |
207 | ||
208 | struct page_vma_mapped_walk { | |
2aff7a47 MWO |
209 | unsigned long pfn; |
210 | unsigned long nr_pages; | |
211 | pgoff_t pgoff; | |
ace71a19 KS |
212 | struct vm_area_struct *vma; |
213 | unsigned long address; | |
214 | pmd_t *pmd; | |
215 | pte_t *pte; | |
216 | spinlock_t *ptl; | |
217 | unsigned int flags; | |
218 | }; | |
219 | ||
eed05e54 MWO |
220 | #define DEFINE_PAGE_VMA_WALK(name, _page, _vma, _address, _flags) \ |
221 | struct page_vma_mapped_walk name = { \ | |
2aff7a47 MWO |
222 | .pfn = page_to_pfn(_page), \ |
223 | .nr_pages = compound_nr(page), \ | |
224 | .pgoff = page_to_pgoff(page), \ | |
eed05e54 MWO |
225 | .vma = _vma, \ |
226 | .address = _address, \ | |
227 | .flags = _flags, \ | |
228 | } | |
229 | ||
230 | #define DEFINE_FOLIO_VMA_WALK(name, _folio, _vma, _address, _flags) \ | |
231 | struct page_vma_mapped_walk name = { \ | |
2aff7a47 MWO |
232 | .pfn = folio_pfn(_folio), \ |
233 | .nr_pages = folio_nr_pages(_folio), \ | |
234 | .pgoff = folio_pgoff(_folio), \ | |
eed05e54 MWO |
235 | .vma = _vma, \ |
236 | .address = _address, \ | |
237 | .flags = _flags, \ | |
238 | } | |
239 | ||
ace71a19 KS |
240 | static inline void page_vma_mapped_walk_done(struct page_vma_mapped_walk *pvmw) |
241 | { | |
5d5d19ed | 242 | /* HugeTLB pte is set to the relevant page table entry without pte_mapped. */ |
2aff7a47 | 243 | if (pvmw->pte && !is_vm_hugetlb_page(pvmw->vma)) |
ace71a19 KS |
244 | pte_unmap(pvmw->pte); |
245 | if (pvmw->ptl) | |
246 | spin_unlock(pvmw->ptl); | |
247 | } | |
248 | ||
249 | bool page_vma_mapped_walk(struct page_vma_mapped_walk *pvmw); | |
250 | ||
1da177e4 LT |
251 | /* |
252 | * Used by swapoff to help locate where page is expected in vma. | |
253 | */ | |
254 | unsigned long page_address_in_vma(struct page *, struct vm_area_struct *); | |
255 | ||
d08b3851 PZ |
256 | /* |
257 | * Cleans the PTEs of shared mappings. | |
258 | * (and since clean PTEs should also be readonly, write protects them too) | |
259 | * | |
260 | * returns the number of cleaned PTEs. | |
261 | */ | |
d9c08e22 | 262 | int folio_mkclean(struct folio *); |
d08b3851 | 263 | |
e388466d KS |
264 | void remove_migration_ptes(struct page *old, struct page *new, bool locked); |
265 | ||
10be22df AK |
266 | /* |
267 | * Called by memory-failure.c to kill processes. | |
268 | */ | |
4fc3f1d6 IM |
269 | struct anon_vma *page_lock_anon_vma_read(struct page *page); |
270 | void page_unlock_anon_vma_read(struct anon_vma *anon_vma); | |
6a46079c | 271 | int page_mapped_in_vma(struct page *page, struct vm_area_struct *vma); |
10be22df | 272 | |
0dd1c7bb JK |
273 | /* |
274 | * rmap_walk_control: To control rmap traversing for specific needs | |
275 | * | |
276 | * arg: passed to rmap_one() and invalid_vma() | |
277 | * rmap_one: executed on each vma where page is mapped | |
278 | * done: for checking traversing termination condition | |
0dd1c7bb JK |
279 | * anon_lock: for getting anon_lock by optimized way rather than default |
280 | * invalid_vma: for skipping uninterested vma | |
281 | */ | |
051ac83a JK |
282 | struct rmap_walk_control { |
283 | void *arg; | |
e4b82222 MK |
284 | /* |
285 | * Return false if page table scanning in rmap_walk should be stopped. | |
286 | * Otherwise, return true. | |
287 | */ | |
288 | bool (*rmap_one)(struct page *page, struct vm_area_struct *vma, | |
051ac83a | 289 | unsigned long addr, void *arg); |
0dd1c7bb | 290 | int (*done)(struct page *page); |
0dd1c7bb JK |
291 | struct anon_vma *(*anon_lock)(struct page *page); |
292 | bool (*invalid_vma)(struct vm_area_struct *vma, void *arg); | |
051ac83a JK |
293 | }; |
294 | ||
1df631ae MK |
295 | void rmap_walk(struct page *page, struct rmap_walk_control *rwc); |
296 | void rmap_walk_locked(struct page *page, struct rmap_walk_control *rwc); | |
e9995ef9 | 297 | |
1da177e4 LT |
298 | #else /* !CONFIG_MMU */ |
299 | ||
300 | #define anon_vma_init() do {} while (0) | |
301 | #define anon_vma_prepare(vma) (0) | |
302 | #define anon_vma_link(vma) do {} while (0) | |
303 | ||
b3ac0413 | 304 | static inline int folio_referenced(struct folio *folio, int is_locked, |
72835c86 | 305 | struct mem_cgroup *memcg, |
01ff53f4 MF |
306 | unsigned long *vm_flags) |
307 | { | |
308 | *vm_flags = 0; | |
64574746 | 309 | return 0; |
01ff53f4 MF |
310 | } |
311 | ||
869f7ee6 | 312 | static inline void try_to_unmap(struct folio *folio, enum ttu_flags flags) |
ab7965de CH |
313 | { |
314 | } | |
1da177e4 | 315 | |
d9c08e22 | 316 | static inline int folio_mkclean(struct folio *folio) |
d08b3851 PZ |
317 | { |
318 | return 0; | |
319 | } | |
1da177e4 LT |
320 | #endif /* CONFIG_MMU */ |
321 | ||
d9c08e22 MWO |
322 | static inline int page_mkclean(struct page *page) |
323 | { | |
324 | return folio_mkclean(page_folio(page)); | |
325 | } | |
1da177e4 | 326 | #endif /* _LINUX_RMAP_H */ |