| 1 | /* |
| 2 | * mm/rmap.c - physical to virtual reverse mappings |
| 3 | * |
| 4 | * Copyright 2001, Rik van Riel <riel@conectiva.com.br> |
| 5 | * Released under the General Public License (GPL). |
| 6 | * |
| 7 | * Simple, low overhead reverse mapping scheme. |
| 8 | * Please try to keep this thing as modular as possible. |
| 9 | * |
| 10 | * Provides methods for unmapping each kind of mapped page: |
| 11 | * the anon methods track anonymous pages, and |
| 12 | * the file methods track pages belonging to an inode. |
| 13 | * |
| 14 | * Original design by Rik van Riel <riel@conectiva.com.br> 2001 |
| 15 | * File methods by Dave McCracken <dmccr@us.ibm.com> 2003, 2004 |
| 16 | * Anonymous methods by Andrea Arcangeli <andrea@suse.de> 2004 |
| 17 | * Contributions by Hugh Dickins 2003, 2004 |
| 18 | */ |
| 19 | |
| 20 | /* |
| 21 | * Lock ordering in mm: |
| 22 | * |
| 23 | * inode->i_rwsem (while writing or truncating, not reading or faulting) |
| 24 | * mm->mmap_lock |
| 25 | * mapping->invalidate_lock (in filemap_fault) |
| 26 | * page->flags PG_locked (lock_page) * (see hugetlbfs below) |
| 27 | * hugetlbfs_i_mmap_rwsem_key (in huge_pmd_share) |
| 28 | * mapping->i_mmap_rwsem |
| 29 | * hugetlb_fault_mutex (hugetlbfs specific page fault mutex) |
| 30 | * anon_vma->rwsem |
| 31 | * mm->page_table_lock or pte_lock |
| 32 | * swap_lock (in swap_duplicate, swap_info_get) |
| 33 | * mmlist_lock (in mmput, drain_mmlist and others) |
| 34 | * mapping->private_lock (in block_dirty_folio) |
| 35 | * folio_lock_memcg move_lock (in block_dirty_folio) |
| 36 | * i_pages lock (widely used) |
| 37 | * lruvec->lru_lock (in folio_lruvec_lock_irq) |
| 38 | * inode->i_lock (in set_page_dirty's __mark_inode_dirty) |
| 39 | * bdi.wb->list_lock (in set_page_dirty's __mark_inode_dirty) |
| 40 | * sb_lock (within inode_lock in fs/fs-writeback.c) |
| 41 | * i_pages lock (widely used, in set_page_dirty, |
| 42 | * in arch-dependent flush_dcache_mmap_lock, |
| 43 | * within bdi.wb->list_lock in __sync_single_inode) |
| 44 | * |
| 45 | * anon_vma->rwsem,mapping->i_mmap_rwsem (memory_failure, collect_procs_anon) |
| 46 | * ->tasklist_lock |
| 47 | * pte map lock |
| 48 | * |
| 49 | * * hugetlbfs PageHuge() pages take locks in this order: |
| 50 | * mapping->i_mmap_rwsem |
| 51 | * hugetlb_fault_mutex (hugetlbfs specific page fault mutex) |
| 52 | * page->flags PG_locked (lock_page) |
| 53 | */ |
| 54 | |
| 55 | #include <linux/mm.h> |
| 56 | #include <linux/sched/mm.h> |
| 57 | #include <linux/sched/task.h> |
| 58 | #include <linux/pagemap.h> |
| 59 | #include <linux/swap.h> |
| 60 | #include <linux/swapops.h> |
| 61 | #include <linux/slab.h> |
| 62 | #include <linux/init.h> |
| 63 | #include <linux/ksm.h> |
| 64 | #include <linux/rmap.h> |
| 65 | #include <linux/rcupdate.h> |
| 66 | #include <linux/export.h> |
| 67 | #include <linux/memcontrol.h> |
| 68 | #include <linux/mmu_notifier.h> |
| 69 | #include <linux/migrate.h> |
| 70 | #include <linux/hugetlb.h> |
| 71 | #include <linux/huge_mm.h> |
| 72 | #include <linux/backing-dev.h> |
| 73 | #include <linux/page_idle.h> |
| 74 | #include <linux/memremap.h> |
| 75 | #include <linux/userfaultfd_k.h> |
| 76 | |
| 77 | #include <asm/tlbflush.h> |
| 78 | |
| 79 | #define CREATE_TRACE_POINTS |
| 80 | #include <trace/events/tlb.h> |
| 81 | #include <trace/events/migrate.h> |
| 82 | |
| 83 | #include "internal.h" |
| 84 | |
| 85 | static struct kmem_cache *anon_vma_cachep; |
| 86 | static struct kmem_cache *anon_vma_chain_cachep; |
| 87 | |
| 88 | static inline struct anon_vma *anon_vma_alloc(void) |
| 89 | { |
| 90 | struct anon_vma *anon_vma; |
| 91 | |
| 92 | anon_vma = kmem_cache_alloc(anon_vma_cachep, GFP_KERNEL); |
| 93 | if (anon_vma) { |
| 94 | atomic_set(&anon_vma->refcount, 1); |
| 95 | anon_vma->degree = 1; /* Reference for first vma */ |
| 96 | anon_vma->parent = anon_vma; |
| 97 | /* |
| 98 | * Initialise the anon_vma root to point to itself. If called |
| 99 | * from fork, the root will be reset to the parents anon_vma. |
| 100 | */ |
| 101 | anon_vma->root = anon_vma; |
| 102 | } |
| 103 | |
| 104 | return anon_vma; |
| 105 | } |
| 106 | |
| 107 | static inline void anon_vma_free(struct anon_vma *anon_vma) |
| 108 | { |
| 109 | VM_BUG_ON(atomic_read(&anon_vma->refcount)); |
| 110 | |
| 111 | /* |
| 112 | * Synchronize against folio_lock_anon_vma_read() such that |
| 113 | * we can safely hold the lock without the anon_vma getting |
| 114 | * freed. |
| 115 | * |
| 116 | * Relies on the full mb implied by the atomic_dec_and_test() from |
| 117 | * put_anon_vma() against the acquire barrier implied by |
| 118 | * down_read_trylock() from folio_lock_anon_vma_read(). This orders: |
| 119 | * |
| 120 | * folio_lock_anon_vma_read() VS put_anon_vma() |
| 121 | * down_read_trylock() atomic_dec_and_test() |
| 122 | * LOCK MB |
| 123 | * atomic_read() rwsem_is_locked() |
| 124 | * |
| 125 | * LOCK should suffice since the actual taking of the lock must |
| 126 | * happen _before_ what follows. |
| 127 | */ |
| 128 | might_sleep(); |
| 129 | if (rwsem_is_locked(&anon_vma->root->rwsem)) { |
| 130 | anon_vma_lock_write(anon_vma); |
| 131 | anon_vma_unlock_write(anon_vma); |
| 132 | } |
| 133 | |
| 134 | kmem_cache_free(anon_vma_cachep, anon_vma); |
| 135 | } |
| 136 | |
| 137 | static inline struct anon_vma_chain *anon_vma_chain_alloc(gfp_t gfp) |
| 138 | { |
| 139 | return kmem_cache_alloc(anon_vma_chain_cachep, gfp); |
| 140 | } |
| 141 | |
| 142 | static void anon_vma_chain_free(struct anon_vma_chain *anon_vma_chain) |
| 143 | { |
| 144 | kmem_cache_free(anon_vma_chain_cachep, anon_vma_chain); |
| 145 | } |
| 146 | |
| 147 | static void anon_vma_chain_link(struct vm_area_struct *vma, |
| 148 | struct anon_vma_chain *avc, |
| 149 | struct anon_vma *anon_vma) |
| 150 | { |
| 151 | avc->vma = vma; |
| 152 | avc->anon_vma = anon_vma; |
| 153 | list_add(&avc->same_vma, &vma->anon_vma_chain); |
| 154 | anon_vma_interval_tree_insert(avc, &anon_vma->rb_root); |
| 155 | } |
| 156 | |
| 157 | /** |
| 158 | * __anon_vma_prepare - attach an anon_vma to a memory region |
| 159 | * @vma: the memory region in question |
| 160 | * |
| 161 | * This makes sure the memory mapping described by 'vma' has |
| 162 | * an 'anon_vma' attached to it, so that we can associate the |
| 163 | * anonymous pages mapped into it with that anon_vma. |
| 164 | * |
| 165 | * The common case will be that we already have one, which |
| 166 | * is handled inline by anon_vma_prepare(). But if |
| 167 | * not we either need to find an adjacent mapping that we |
| 168 | * can re-use the anon_vma from (very common when the only |
| 169 | * reason for splitting a vma has been mprotect()), or we |
| 170 | * allocate a new one. |
| 171 | * |
| 172 | * Anon-vma allocations are very subtle, because we may have |
| 173 | * optimistically looked up an anon_vma in folio_lock_anon_vma_read() |
| 174 | * and that may actually touch the rwsem even in the newly |
| 175 | * allocated vma (it depends on RCU to make sure that the |
| 176 | * anon_vma isn't actually destroyed). |
| 177 | * |
| 178 | * As a result, we need to do proper anon_vma locking even |
| 179 | * for the new allocation. At the same time, we do not want |
| 180 | * to do any locking for the common case of already having |
| 181 | * an anon_vma. |
| 182 | * |
| 183 | * This must be called with the mmap_lock held for reading. |
| 184 | */ |
| 185 | int __anon_vma_prepare(struct vm_area_struct *vma) |
| 186 | { |
| 187 | struct mm_struct *mm = vma->vm_mm; |
| 188 | struct anon_vma *anon_vma, *allocated; |
| 189 | struct anon_vma_chain *avc; |
| 190 | |
| 191 | might_sleep(); |
| 192 | |
| 193 | avc = anon_vma_chain_alloc(GFP_KERNEL); |
| 194 | if (!avc) |
| 195 | goto out_enomem; |
| 196 | |
| 197 | anon_vma = find_mergeable_anon_vma(vma); |
| 198 | allocated = NULL; |
| 199 | if (!anon_vma) { |
| 200 | anon_vma = anon_vma_alloc(); |
| 201 | if (unlikely(!anon_vma)) |
| 202 | goto out_enomem_free_avc; |
| 203 | allocated = anon_vma; |
| 204 | } |
| 205 | |
| 206 | anon_vma_lock_write(anon_vma); |
| 207 | /* page_table_lock to protect against threads */ |
| 208 | spin_lock(&mm->page_table_lock); |
| 209 | if (likely(!vma->anon_vma)) { |
| 210 | vma->anon_vma = anon_vma; |
| 211 | anon_vma_chain_link(vma, avc, anon_vma); |
| 212 | /* vma reference or self-parent link for new root */ |
| 213 | anon_vma->degree++; |
| 214 | allocated = NULL; |
| 215 | avc = NULL; |
| 216 | } |
| 217 | spin_unlock(&mm->page_table_lock); |
| 218 | anon_vma_unlock_write(anon_vma); |
| 219 | |
| 220 | if (unlikely(allocated)) |
| 221 | put_anon_vma(allocated); |
| 222 | if (unlikely(avc)) |
| 223 | anon_vma_chain_free(avc); |
| 224 | |
| 225 | return 0; |
| 226 | |
| 227 | out_enomem_free_avc: |
| 228 | anon_vma_chain_free(avc); |
| 229 | out_enomem: |
| 230 | return -ENOMEM; |
| 231 | } |
| 232 | |
| 233 | /* |
| 234 | * This is a useful helper function for locking the anon_vma root as |
| 235 | * we traverse the vma->anon_vma_chain, looping over anon_vma's that |
| 236 | * have the same vma. |
| 237 | * |
| 238 | * Such anon_vma's should have the same root, so you'd expect to see |
| 239 | * just a single mutex_lock for the whole traversal. |
| 240 | */ |
| 241 | static inline struct anon_vma *lock_anon_vma_root(struct anon_vma *root, struct anon_vma *anon_vma) |
| 242 | { |
| 243 | struct anon_vma *new_root = anon_vma->root; |
| 244 | if (new_root != root) { |
| 245 | if (WARN_ON_ONCE(root)) |
| 246 | up_write(&root->rwsem); |
| 247 | root = new_root; |
| 248 | down_write(&root->rwsem); |
| 249 | } |
| 250 | return root; |
| 251 | } |
| 252 | |
| 253 | static inline void unlock_anon_vma_root(struct anon_vma *root) |
| 254 | { |
| 255 | if (root) |
| 256 | up_write(&root->rwsem); |
| 257 | } |
| 258 | |
| 259 | /* |
| 260 | * Attach the anon_vmas from src to dst. |
| 261 | * Returns 0 on success, -ENOMEM on failure. |
| 262 | * |
| 263 | * anon_vma_clone() is called by __vma_adjust(), __split_vma(), copy_vma() and |
| 264 | * anon_vma_fork(). The first three want an exact copy of src, while the last |
| 265 | * one, anon_vma_fork(), may try to reuse an existing anon_vma to prevent |
| 266 | * endless growth of anon_vma. Since dst->anon_vma is set to NULL before call, |
| 267 | * we can identify this case by checking (!dst->anon_vma && src->anon_vma). |
| 268 | * |
| 269 | * If (!dst->anon_vma && src->anon_vma) is true, this function tries to find |
| 270 | * and reuse existing anon_vma which has no vmas and only one child anon_vma. |
| 271 | * This prevents degradation of anon_vma hierarchy to endless linear chain in |
| 272 | * case of constantly forking task. On the other hand, an anon_vma with more |
| 273 | * than one child isn't reused even if there was no alive vma, thus rmap |
| 274 | * walker has a good chance of avoiding scanning the whole hierarchy when it |
| 275 | * searches where page is mapped. |
| 276 | */ |
| 277 | int anon_vma_clone(struct vm_area_struct *dst, struct vm_area_struct *src) |
| 278 | { |
| 279 | struct anon_vma_chain *avc, *pavc; |
| 280 | struct anon_vma *root = NULL; |
| 281 | |
| 282 | list_for_each_entry_reverse(pavc, &src->anon_vma_chain, same_vma) { |
| 283 | struct anon_vma *anon_vma; |
| 284 | |
| 285 | avc = anon_vma_chain_alloc(GFP_NOWAIT | __GFP_NOWARN); |
| 286 | if (unlikely(!avc)) { |
| 287 | unlock_anon_vma_root(root); |
| 288 | root = NULL; |
| 289 | avc = anon_vma_chain_alloc(GFP_KERNEL); |
| 290 | if (!avc) |
| 291 | goto enomem_failure; |
| 292 | } |
| 293 | anon_vma = pavc->anon_vma; |
| 294 | root = lock_anon_vma_root(root, anon_vma); |
| 295 | anon_vma_chain_link(dst, avc, anon_vma); |
| 296 | |
| 297 | /* |
| 298 | * Reuse existing anon_vma if its degree lower than two, |
| 299 | * that means it has no vma and only one anon_vma child. |
| 300 | * |
| 301 | * Do not chose parent anon_vma, otherwise first child |
| 302 | * will always reuse it. Root anon_vma is never reused: |
| 303 | * it has self-parent reference and at least one child. |
| 304 | */ |
| 305 | if (!dst->anon_vma && src->anon_vma && |
| 306 | anon_vma != src->anon_vma && anon_vma->degree < 2) |
| 307 | dst->anon_vma = anon_vma; |
| 308 | } |
| 309 | if (dst->anon_vma) |
| 310 | dst->anon_vma->degree++; |
| 311 | unlock_anon_vma_root(root); |
| 312 | return 0; |
| 313 | |
| 314 | enomem_failure: |
| 315 | /* |
| 316 | * dst->anon_vma is dropped here otherwise its degree can be incorrectly |
| 317 | * decremented in unlink_anon_vmas(). |
| 318 | * We can safely do this because callers of anon_vma_clone() don't care |
| 319 | * about dst->anon_vma if anon_vma_clone() failed. |
| 320 | */ |
| 321 | dst->anon_vma = NULL; |
| 322 | unlink_anon_vmas(dst); |
| 323 | return -ENOMEM; |
| 324 | } |
| 325 | |
| 326 | /* |
| 327 | * Attach vma to its own anon_vma, as well as to the anon_vmas that |
| 328 | * the corresponding VMA in the parent process is attached to. |
| 329 | * Returns 0 on success, non-zero on failure. |
| 330 | */ |
| 331 | int anon_vma_fork(struct vm_area_struct *vma, struct vm_area_struct *pvma) |
| 332 | { |
| 333 | struct anon_vma_chain *avc; |
| 334 | struct anon_vma *anon_vma; |
| 335 | int error; |
| 336 | |
| 337 | /* Don't bother if the parent process has no anon_vma here. */ |
| 338 | if (!pvma->anon_vma) |
| 339 | return 0; |
| 340 | |
| 341 | /* Drop inherited anon_vma, we'll reuse existing or allocate new. */ |
| 342 | vma->anon_vma = NULL; |
| 343 | |
| 344 | /* |
| 345 | * First, attach the new VMA to the parent VMA's anon_vmas, |
| 346 | * so rmap can find non-COWed pages in child processes. |
| 347 | */ |
| 348 | error = anon_vma_clone(vma, pvma); |
| 349 | if (error) |
| 350 | return error; |
| 351 | |
| 352 | /* An existing anon_vma has been reused, all done then. */ |
| 353 | if (vma->anon_vma) |
| 354 | return 0; |
| 355 | |
| 356 | /* Then add our own anon_vma. */ |
| 357 | anon_vma = anon_vma_alloc(); |
| 358 | if (!anon_vma) |
| 359 | goto out_error; |
| 360 | avc = anon_vma_chain_alloc(GFP_KERNEL); |
| 361 | if (!avc) |
| 362 | goto out_error_free_anon_vma; |
| 363 | |
| 364 | /* |
| 365 | * The root anon_vma's rwsem is the lock actually used when we |
| 366 | * lock any of the anon_vmas in this anon_vma tree. |
| 367 | */ |
| 368 | anon_vma->root = pvma->anon_vma->root; |
| 369 | anon_vma->parent = pvma->anon_vma; |
| 370 | /* |
| 371 | * With refcounts, an anon_vma can stay around longer than the |
| 372 | * process it belongs to. The root anon_vma needs to be pinned until |
| 373 | * this anon_vma is freed, because the lock lives in the root. |
| 374 | */ |
| 375 | get_anon_vma(anon_vma->root); |
| 376 | /* Mark this anon_vma as the one where our new (COWed) pages go. */ |
| 377 | vma->anon_vma = anon_vma; |
| 378 | anon_vma_lock_write(anon_vma); |
| 379 | anon_vma_chain_link(vma, avc, anon_vma); |
| 380 | anon_vma->parent->degree++; |
| 381 | anon_vma_unlock_write(anon_vma); |
| 382 | |
| 383 | return 0; |
| 384 | |
| 385 | out_error_free_anon_vma: |
| 386 | put_anon_vma(anon_vma); |
| 387 | out_error: |
| 388 | unlink_anon_vmas(vma); |
| 389 | return -ENOMEM; |
| 390 | } |
| 391 | |
| 392 | void unlink_anon_vmas(struct vm_area_struct *vma) |
| 393 | { |
| 394 | struct anon_vma_chain *avc, *next; |
| 395 | struct anon_vma *root = NULL; |
| 396 | |
| 397 | /* |
| 398 | * Unlink each anon_vma chained to the VMA. This list is ordered |
| 399 | * from newest to oldest, ensuring the root anon_vma gets freed last. |
| 400 | */ |
| 401 | list_for_each_entry_safe(avc, next, &vma->anon_vma_chain, same_vma) { |
| 402 | struct anon_vma *anon_vma = avc->anon_vma; |
| 403 | |
| 404 | root = lock_anon_vma_root(root, anon_vma); |
| 405 | anon_vma_interval_tree_remove(avc, &anon_vma->rb_root); |
| 406 | |
| 407 | /* |
| 408 | * Leave empty anon_vmas on the list - we'll need |
| 409 | * to free them outside the lock. |
| 410 | */ |
| 411 | if (RB_EMPTY_ROOT(&anon_vma->rb_root.rb_root)) { |
| 412 | anon_vma->parent->degree--; |
| 413 | continue; |
| 414 | } |
| 415 | |
| 416 | list_del(&avc->same_vma); |
| 417 | anon_vma_chain_free(avc); |
| 418 | } |
| 419 | if (vma->anon_vma) { |
| 420 | vma->anon_vma->degree--; |
| 421 | |
| 422 | /* |
| 423 | * vma would still be needed after unlink, and anon_vma will be prepared |
| 424 | * when handle fault. |
| 425 | */ |
| 426 | vma->anon_vma = NULL; |
| 427 | } |
| 428 | unlock_anon_vma_root(root); |
| 429 | |
| 430 | /* |
| 431 | * Iterate the list once more, it now only contains empty and unlinked |
| 432 | * anon_vmas, destroy them. Could not do before due to __put_anon_vma() |
| 433 | * needing to write-acquire the anon_vma->root->rwsem. |
| 434 | */ |
| 435 | list_for_each_entry_safe(avc, next, &vma->anon_vma_chain, same_vma) { |
| 436 | struct anon_vma *anon_vma = avc->anon_vma; |
| 437 | |
| 438 | VM_WARN_ON(anon_vma->degree); |
| 439 | put_anon_vma(anon_vma); |
| 440 | |
| 441 | list_del(&avc->same_vma); |
| 442 | anon_vma_chain_free(avc); |
| 443 | } |
| 444 | } |
| 445 | |
| 446 | static void anon_vma_ctor(void *data) |
| 447 | { |
| 448 | struct anon_vma *anon_vma = data; |
| 449 | |
| 450 | init_rwsem(&anon_vma->rwsem); |
| 451 | atomic_set(&anon_vma->refcount, 0); |
| 452 | anon_vma->rb_root = RB_ROOT_CACHED; |
| 453 | } |
| 454 | |
| 455 | void __init anon_vma_init(void) |
| 456 | { |
| 457 | anon_vma_cachep = kmem_cache_create("anon_vma", sizeof(struct anon_vma), |
| 458 | 0, SLAB_TYPESAFE_BY_RCU|SLAB_PANIC|SLAB_ACCOUNT, |
| 459 | anon_vma_ctor); |
| 460 | anon_vma_chain_cachep = KMEM_CACHE(anon_vma_chain, |
| 461 | SLAB_PANIC|SLAB_ACCOUNT); |
| 462 | } |
| 463 | |
| 464 | /* |
| 465 | * Getting a lock on a stable anon_vma from a page off the LRU is tricky! |
| 466 | * |
| 467 | * Since there is no serialization what so ever against page_remove_rmap() |
| 468 | * the best this function can do is return a refcount increased anon_vma |
| 469 | * that might have been relevant to this page. |
| 470 | * |
| 471 | * The page might have been remapped to a different anon_vma or the anon_vma |
| 472 | * returned may already be freed (and even reused). |
| 473 | * |
| 474 | * In case it was remapped to a different anon_vma, the new anon_vma will be a |
| 475 | * child of the old anon_vma, and the anon_vma lifetime rules will therefore |
| 476 | * ensure that any anon_vma obtained from the page will still be valid for as |
| 477 | * long as we observe page_mapped() [ hence all those page_mapped() tests ]. |
| 478 | * |
| 479 | * All users of this function must be very careful when walking the anon_vma |
| 480 | * chain and verify that the page in question is indeed mapped in it |
| 481 | * [ something equivalent to page_mapped_in_vma() ]. |
| 482 | * |
| 483 | * Since anon_vma's slab is SLAB_TYPESAFE_BY_RCU and we know from |
| 484 | * page_remove_rmap() that the anon_vma pointer from page->mapping is valid |
| 485 | * if there is a mapcount, we can dereference the anon_vma after observing |
| 486 | * those. |
| 487 | */ |
| 488 | struct anon_vma *page_get_anon_vma(struct page *page) |
| 489 | { |
| 490 | struct anon_vma *anon_vma = NULL; |
| 491 | unsigned long anon_mapping; |
| 492 | |
| 493 | rcu_read_lock(); |
| 494 | anon_mapping = (unsigned long)READ_ONCE(page->mapping); |
| 495 | if ((anon_mapping & PAGE_MAPPING_FLAGS) != PAGE_MAPPING_ANON) |
| 496 | goto out; |
| 497 | if (!page_mapped(page)) |
| 498 | goto out; |
| 499 | |
| 500 | anon_vma = (struct anon_vma *) (anon_mapping - PAGE_MAPPING_ANON); |
| 501 | if (!atomic_inc_not_zero(&anon_vma->refcount)) { |
| 502 | anon_vma = NULL; |
| 503 | goto out; |
| 504 | } |
| 505 | |
| 506 | /* |
| 507 | * If this page is still mapped, then its anon_vma cannot have been |
| 508 | * freed. But if it has been unmapped, we have no security against the |
| 509 | * anon_vma structure being freed and reused (for another anon_vma: |
| 510 | * SLAB_TYPESAFE_BY_RCU guarantees that - so the atomic_inc_not_zero() |
| 511 | * above cannot corrupt). |
| 512 | */ |
| 513 | if (!page_mapped(page)) { |
| 514 | rcu_read_unlock(); |
| 515 | put_anon_vma(anon_vma); |
| 516 | return NULL; |
| 517 | } |
| 518 | out: |
| 519 | rcu_read_unlock(); |
| 520 | |
| 521 | return anon_vma; |
| 522 | } |
| 523 | |
| 524 | /* |
| 525 | * Similar to page_get_anon_vma() except it locks the anon_vma. |
| 526 | * |
| 527 | * Its a little more complex as it tries to keep the fast path to a single |
| 528 | * atomic op -- the trylock. If we fail the trylock, we fall back to getting a |
| 529 | * reference like with page_get_anon_vma() and then block on the mutex. |
| 530 | */ |
| 531 | struct anon_vma *folio_lock_anon_vma_read(struct folio *folio) |
| 532 | { |
| 533 | struct anon_vma *anon_vma = NULL; |
| 534 | struct anon_vma *root_anon_vma; |
| 535 | unsigned long anon_mapping; |
| 536 | |
| 537 | rcu_read_lock(); |
| 538 | anon_mapping = (unsigned long)READ_ONCE(folio->mapping); |
| 539 | if ((anon_mapping & PAGE_MAPPING_FLAGS) != PAGE_MAPPING_ANON) |
| 540 | goto out; |
| 541 | if (!folio_mapped(folio)) |
| 542 | goto out; |
| 543 | |
| 544 | anon_vma = (struct anon_vma *) (anon_mapping - PAGE_MAPPING_ANON); |
| 545 | root_anon_vma = READ_ONCE(anon_vma->root); |
| 546 | if (down_read_trylock(&root_anon_vma->rwsem)) { |
| 547 | /* |
| 548 | * If the folio is still mapped, then this anon_vma is still |
| 549 | * its anon_vma, and holding the mutex ensures that it will |
| 550 | * not go away, see anon_vma_free(). |
| 551 | */ |
| 552 | if (!folio_mapped(folio)) { |
| 553 | up_read(&root_anon_vma->rwsem); |
| 554 | anon_vma = NULL; |
| 555 | } |
| 556 | goto out; |
| 557 | } |
| 558 | |
| 559 | /* trylock failed, we got to sleep */ |
| 560 | if (!atomic_inc_not_zero(&anon_vma->refcount)) { |
| 561 | anon_vma = NULL; |
| 562 | goto out; |
| 563 | } |
| 564 | |
| 565 | if (!folio_mapped(folio)) { |
| 566 | rcu_read_unlock(); |
| 567 | put_anon_vma(anon_vma); |
| 568 | return NULL; |
| 569 | } |
| 570 | |
| 571 | /* we pinned the anon_vma, its safe to sleep */ |
| 572 | rcu_read_unlock(); |
| 573 | anon_vma_lock_read(anon_vma); |
| 574 | |
| 575 | if (atomic_dec_and_test(&anon_vma->refcount)) { |
| 576 | /* |
| 577 | * Oops, we held the last refcount, release the lock |
| 578 | * and bail -- can't simply use put_anon_vma() because |
| 579 | * we'll deadlock on the anon_vma_lock_write() recursion. |
| 580 | */ |
| 581 | anon_vma_unlock_read(anon_vma); |
| 582 | __put_anon_vma(anon_vma); |
| 583 | anon_vma = NULL; |
| 584 | } |
| 585 | |
| 586 | return anon_vma; |
| 587 | |
| 588 | out: |
| 589 | rcu_read_unlock(); |
| 590 | return anon_vma; |
| 591 | } |
| 592 | |
| 593 | void page_unlock_anon_vma_read(struct anon_vma *anon_vma) |
| 594 | { |
| 595 | anon_vma_unlock_read(anon_vma); |
| 596 | } |
| 597 | |
| 598 | #ifdef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH |
| 599 | /* |
| 600 | * Flush TLB entries for recently unmapped pages from remote CPUs. It is |
| 601 | * important if a PTE was dirty when it was unmapped that it's flushed |
| 602 | * before any IO is initiated on the page to prevent lost writes. Similarly, |
| 603 | * it must be flushed before freeing to prevent data leakage. |
| 604 | */ |
| 605 | void try_to_unmap_flush(void) |
| 606 | { |
| 607 | struct tlbflush_unmap_batch *tlb_ubc = ¤t->tlb_ubc; |
| 608 | |
| 609 | if (!tlb_ubc->flush_required) |
| 610 | return; |
| 611 | |
| 612 | arch_tlbbatch_flush(&tlb_ubc->arch); |
| 613 | tlb_ubc->flush_required = false; |
| 614 | tlb_ubc->writable = false; |
| 615 | } |
| 616 | |
| 617 | /* Flush iff there are potentially writable TLB entries that can race with IO */ |
| 618 | void try_to_unmap_flush_dirty(void) |
| 619 | { |
| 620 | struct tlbflush_unmap_batch *tlb_ubc = ¤t->tlb_ubc; |
| 621 | |
| 622 | if (tlb_ubc->writable) |
| 623 | try_to_unmap_flush(); |
| 624 | } |
| 625 | |
| 626 | /* |
| 627 | * Bits 0-14 of mm->tlb_flush_batched record pending generations. |
| 628 | * Bits 16-30 of mm->tlb_flush_batched bit record flushed generations. |
| 629 | */ |
| 630 | #define TLB_FLUSH_BATCH_FLUSHED_SHIFT 16 |
| 631 | #define TLB_FLUSH_BATCH_PENDING_MASK \ |
| 632 | ((1 << (TLB_FLUSH_BATCH_FLUSHED_SHIFT - 1)) - 1) |
| 633 | #define TLB_FLUSH_BATCH_PENDING_LARGE \ |
| 634 | (TLB_FLUSH_BATCH_PENDING_MASK / 2) |
| 635 | |
| 636 | static void set_tlb_ubc_flush_pending(struct mm_struct *mm, bool writable) |
| 637 | { |
| 638 | struct tlbflush_unmap_batch *tlb_ubc = ¤t->tlb_ubc; |
| 639 | int batch, nbatch; |
| 640 | |
| 641 | arch_tlbbatch_add_mm(&tlb_ubc->arch, mm); |
| 642 | tlb_ubc->flush_required = true; |
| 643 | |
| 644 | /* |
| 645 | * Ensure compiler does not re-order the setting of tlb_flush_batched |
| 646 | * before the PTE is cleared. |
| 647 | */ |
| 648 | barrier(); |
| 649 | batch = atomic_read(&mm->tlb_flush_batched); |
| 650 | retry: |
| 651 | if ((batch & TLB_FLUSH_BATCH_PENDING_MASK) > TLB_FLUSH_BATCH_PENDING_LARGE) { |
| 652 | /* |
| 653 | * Prevent `pending' from catching up with `flushed' because of |
| 654 | * overflow. Reset `pending' and `flushed' to be 1 and 0 if |
| 655 | * `pending' becomes large. |
| 656 | */ |
| 657 | nbatch = atomic_cmpxchg(&mm->tlb_flush_batched, batch, 1); |
| 658 | if (nbatch != batch) { |
| 659 | batch = nbatch; |
| 660 | goto retry; |
| 661 | } |
| 662 | } else { |
| 663 | atomic_inc(&mm->tlb_flush_batched); |
| 664 | } |
| 665 | |
| 666 | /* |
| 667 | * If the PTE was dirty then it's best to assume it's writable. The |
| 668 | * caller must use try_to_unmap_flush_dirty() or try_to_unmap_flush() |
| 669 | * before the page is queued for IO. |
| 670 | */ |
| 671 | if (writable) |
| 672 | tlb_ubc->writable = true; |
| 673 | } |
| 674 | |
| 675 | /* |
| 676 | * Returns true if the TLB flush should be deferred to the end of a batch of |
| 677 | * unmap operations to reduce IPIs. |
| 678 | */ |
| 679 | static bool should_defer_flush(struct mm_struct *mm, enum ttu_flags flags) |
| 680 | { |
| 681 | bool should_defer = false; |
| 682 | |
| 683 | if (!(flags & TTU_BATCH_FLUSH)) |
| 684 | return false; |
| 685 | |
| 686 | /* If remote CPUs need to be flushed then defer batch the flush */ |
| 687 | if (cpumask_any_but(mm_cpumask(mm), get_cpu()) < nr_cpu_ids) |
| 688 | should_defer = true; |
| 689 | put_cpu(); |
| 690 | |
| 691 | return should_defer; |
| 692 | } |
| 693 | |
| 694 | /* |
| 695 | * Reclaim unmaps pages under the PTL but do not flush the TLB prior to |
| 696 | * releasing the PTL if TLB flushes are batched. It's possible for a parallel |
| 697 | * operation such as mprotect or munmap to race between reclaim unmapping |
| 698 | * the page and flushing the page. If this race occurs, it potentially allows |
| 699 | * access to data via a stale TLB entry. Tracking all mm's that have TLB |
| 700 | * batching in flight would be expensive during reclaim so instead track |
| 701 | * whether TLB batching occurred in the past and if so then do a flush here |
| 702 | * if required. This will cost one additional flush per reclaim cycle paid |
| 703 | * by the first operation at risk such as mprotect and mumap. |
| 704 | * |
| 705 | * This must be called under the PTL so that an access to tlb_flush_batched |
| 706 | * that is potentially a "reclaim vs mprotect/munmap/etc" race will synchronise |
| 707 | * via the PTL. |
| 708 | */ |
| 709 | void flush_tlb_batched_pending(struct mm_struct *mm) |
| 710 | { |
| 711 | int batch = atomic_read(&mm->tlb_flush_batched); |
| 712 | int pending = batch & TLB_FLUSH_BATCH_PENDING_MASK; |
| 713 | int flushed = batch >> TLB_FLUSH_BATCH_FLUSHED_SHIFT; |
| 714 | |
| 715 | if (pending != flushed) { |
| 716 | flush_tlb_mm(mm); |
| 717 | /* |
| 718 | * If the new TLB flushing is pending during flushing, leave |
| 719 | * mm->tlb_flush_batched as is, to avoid losing flushing. |
| 720 | */ |
| 721 | atomic_cmpxchg(&mm->tlb_flush_batched, batch, |
| 722 | pending | (pending << TLB_FLUSH_BATCH_FLUSHED_SHIFT)); |
| 723 | } |
| 724 | } |
| 725 | #else |
| 726 | static void set_tlb_ubc_flush_pending(struct mm_struct *mm, bool writable) |
| 727 | { |
| 728 | } |
| 729 | |
| 730 | static bool should_defer_flush(struct mm_struct *mm, enum ttu_flags flags) |
| 731 | { |
| 732 | return false; |
| 733 | } |
| 734 | #endif /* CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH */ |
| 735 | |
| 736 | /* |
| 737 | * At what user virtual address is page expected in vma? |
| 738 | * Caller should check the page is actually part of the vma. |
| 739 | */ |
| 740 | unsigned long page_address_in_vma(struct page *page, struct vm_area_struct *vma) |
| 741 | { |
| 742 | struct folio *folio = page_folio(page); |
| 743 | if (folio_test_anon(folio)) { |
| 744 | struct anon_vma *page__anon_vma = folio_anon_vma(folio); |
| 745 | /* |
| 746 | * Note: swapoff's unuse_vma() is more efficient with this |
| 747 | * check, and needs it to match anon_vma when KSM is active. |
| 748 | */ |
| 749 | if (!vma->anon_vma || !page__anon_vma || |
| 750 | vma->anon_vma->root != page__anon_vma->root) |
| 751 | return -EFAULT; |
| 752 | } else if (!vma->vm_file) { |
| 753 | return -EFAULT; |
| 754 | } else if (vma->vm_file->f_mapping != folio->mapping) { |
| 755 | return -EFAULT; |
| 756 | } |
| 757 | |
| 758 | return vma_address(page, vma); |
| 759 | } |
| 760 | |
| 761 | pmd_t *mm_find_pmd(struct mm_struct *mm, unsigned long address) |
| 762 | { |
| 763 | pgd_t *pgd; |
| 764 | p4d_t *p4d; |
| 765 | pud_t *pud; |
| 766 | pmd_t *pmd = NULL; |
| 767 | pmd_t pmde; |
| 768 | |
| 769 | pgd = pgd_offset(mm, address); |
| 770 | if (!pgd_present(*pgd)) |
| 771 | goto out; |
| 772 | |
| 773 | p4d = p4d_offset(pgd, address); |
| 774 | if (!p4d_present(*p4d)) |
| 775 | goto out; |
| 776 | |
| 777 | pud = pud_offset(p4d, address); |
| 778 | if (!pud_present(*pud)) |
| 779 | goto out; |
| 780 | |
| 781 | pmd = pmd_offset(pud, address); |
| 782 | /* |
| 783 | * Some THP functions use the sequence pmdp_huge_clear_flush(), set_pmd_at() |
| 784 | * without holding anon_vma lock for write. So when looking for a |
| 785 | * genuine pmde (in which to find pte), test present and !THP together. |
| 786 | */ |
| 787 | pmde = *pmd; |
| 788 | barrier(); |
| 789 | if (!pmd_present(pmde) || pmd_trans_huge(pmde)) |
| 790 | pmd = NULL; |
| 791 | out: |
| 792 | return pmd; |
| 793 | } |
| 794 | |
| 795 | struct folio_referenced_arg { |
| 796 | int mapcount; |
| 797 | int referenced; |
| 798 | unsigned long vm_flags; |
| 799 | struct mem_cgroup *memcg; |
| 800 | }; |
| 801 | /* |
| 802 | * arg: folio_referenced_arg will be passed |
| 803 | */ |
| 804 | static bool folio_referenced_one(struct folio *folio, |
| 805 | struct vm_area_struct *vma, unsigned long address, void *arg) |
| 806 | { |
| 807 | struct folio_referenced_arg *pra = arg; |
| 808 | DEFINE_FOLIO_VMA_WALK(pvmw, folio, vma, address, 0); |
| 809 | int referenced = 0; |
| 810 | |
| 811 | while (page_vma_mapped_walk(&pvmw)) { |
| 812 | address = pvmw.address; |
| 813 | |
| 814 | if ((vma->vm_flags & VM_LOCKED) && |
| 815 | (!folio_test_large(folio) || !pvmw.pte)) { |
| 816 | /* Restore the mlock which got missed */ |
| 817 | mlock_vma_folio(folio, vma, !pvmw.pte); |
| 818 | page_vma_mapped_walk_done(&pvmw); |
| 819 | pra->vm_flags |= VM_LOCKED; |
| 820 | return false; /* To break the loop */ |
| 821 | } |
| 822 | |
| 823 | if (pvmw.pte) { |
| 824 | if (ptep_clear_flush_young_notify(vma, address, |
| 825 | pvmw.pte)) { |
| 826 | /* |
| 827 | * Don't treat a reference through |
| 828 | * a sequentially read mapping as such. |
| 829 | * If the folio has been used in another mapping, |
| 830 | * we will catch it; if this other mapping is |
| 831 | * already gone, the unmap path will have set |
| 832 | * the referenced flag or activated the folio. |
| 833 | */ |
| 834 | if (likely(!(vma->vm_flags & VM_SEQ_READ))) |
| 835 | referenced++; |
| 836 | } |
| 837 | } else if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE)) { |
| 838 | if (pmdp_clear_flush_young_notify(vma, address, |
| 839 | pvmw.pmd)) |
| 840 | referenced++; |
| 841 | } else { |
| 842 | /* unexpected pmd-mapped folio? */ |
| 843 | WARN_ON_ONCE(1); |
| 844 | } |
| 845 | |
| 846 | pra->mapcount--; |
| 847 | } |
| 848 | |
| 849 | if (referenced) |
| 850 | folio_clear_idle(folio); |
| 851 | if (folio_test_clear_young(folio)) |
| 852 | referenced++; |
| 853 | |
| 854 | if (referenced) { |
| 855 | pra->referenced++; |
| 856 | pra->vm_flags |= vma->vm_flags & ~VM_LOCKED; |
| 857 | } |
| 858 | |
| 859 | if (!pra->mapcount) |
| 860 | return false; /* To break the loop */ |
| 861 | |
| 862 | return true; |
| 863 | } |
| 864 | |
| 865 | static bool invalid_folio_referenced_vma(struct vm_area_struct *vma, void *arg) |
| 866 | { |
| 867 | struct folio_referenced_arg *pra = arg; |
| 868 | struct mem_cgroup *memcg = pra->memcg; |
| 869 | |
| 870 | if (!mm_match_cgroup(vma->vm_mm, memcg)) |
| 871 | return true; |
| 872 | |
| 873 | return false; |
| 874 | } |
| 875 | |
| 876 | /** |
| 877 | * folio_referenced() - Test if the folio was referenced. |
| 878 | * @folio: The folio to test. |
| 879 | * @is_locked: Caller holds lock on the folio. |
| 880 | * @memcg: target memory cgroup |
| 881 | * @vm_flags: A combination of all the vma->vm_flags which referenced the folio. |
| 882 | * |
| 883 | * Quick test_and_clear_referenced for all mappings of a folio, |
| 884 | * |
| 885 | * Return: The number of mappings which referenced the folio. |
| 886 | */ |
| 887 | int folio_referenced(struct folio *folio, int is_locked, |
| 888 | struct mem_cgroup *memcg, unsigned long *vm_flags) |
| 889 | { |
| 890 | int we_locked = 0; |
| 891 | struct folio_referenced_arg pra = { |
| 892 | .mapcount = folio_mapcount(folio), |
| 893 | .memcg = memcg, |
| 894 | }; |
| 895 | struct rmap_walk_control rwc = { |
| 896 | .rmap_one = folio_referenced_one, |
| 897 | .arg = (void *)&pra, |
| 898 | .anon_lock = folio_lock_anon_vma_read, |
| 899 | }; |
| 900 | |
| 901 | *vm_flags = 0; |
| 902 | if (!pra.mapcount) |
| 903 | return 0; |
| 904 | |
| 905 | if (!folio_raw_mapping(folio)) |
| 906 | return 0; |
| 907 | |
| 908 | if (!is_locked && (!folio_test_anon(folio) || folio_test_ksm(folio))) { |
| 909 | we_locked = folio_trylock(folio); |
| 910 | if (!we_locked) |
| 911 | return 1; |
| 912 | } |
| 913 | |
| 914 | /* |
| 915 | * If we are reclaiming on behalf of a cgroup, skip |
| 916 | * counting on behalf of references from different |
| 917 | * cgroups |
| 918 | */ |
| 919 | if (memcg) { |
| 920 | rwc.invalid_vma = invalid_folio_referenced_vma; |
| 921 | } |
| 922 | |
| 923 | rmap_walk(folio, &rwc); |
| 924 | *vm_flags = pra.vm_flags; |
| 925 | |
| 926 | if (we_locked) |
| 927 | folio_unlock(folio); |
| 928 | |
| 929 | return pra.referenced; |
| 930 | } |
| 931 | |
| 932 | static int page_vma_mkclean_one(struct page_vma_mapped_walk *pvmw) |
| 933 | { |
| 934 | int cleaned = 0; |
| 935 | struct vm_area_struct *vma = pvmw->vma; |
| 936 | struct mmu_notifier_range range; |
| 937 | unsigned long address = pvmw->address; |
| 938 | |
| 939 | /* |
| 940 | * We have to assume the worse case ie pmd for invalidation. Note that |
| 941 | * the folio can not be freed from this function. |
| 942 | */ |
| 943 | mmu_notifier_range_init(&range, MMU_NOTIFY_PROTECTION_PAGE, |
| 944 | 0, vma, vma->vm_mm, address, |
| 945 | vma_address_end(pvmw)); |
| 946 | mmu_notifier_invalidate_range_start(&range); |
| 947 | |
| 948 | while (page_vma_mapped_walk(pvmw)) { |
| 949 | int ret = 0; |
| 950 | |
| 951 | address = pvmw->address; |
| 952 | if (pvmw->pte) { |
| 953 | pte_t entry; |
| 954 | pte_t *pte = pvmw->pte; |
| 955 | |
| 956 | if (!pte_dirty(*pte) && !pte_write(*pte)) |
| 957 | continue; |
| 958 | |
| 959 | flush_cache_page(vma, address, pte_pfn(*pte)); |
| 960 | entry = ptep_clear_flush(vma, address, pte); |
| 961 | entry = pte_wrprotect(entry); |
| 962 | entry = pte_mkclean(entry); |
| 963 | set_pte_at(vma->vm_mm, address, pte, entry); |
| 964 | ret = 1; |
| 965 | } else { |
| 966 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE |
| 967 | pmd_t *pmd = pvmw->pmd; |
| 968 | pmd_t entry; |
| 969 | |
| 970 | if (!pmd_dirty(*pmd) && !pmd_write(*pmd)) |
| 971 | continue; |
| 972 | |
| 973 | flush_cache_range(vma, address, |
| 974 | address + HPAGE_PMD_SIZE); |
| 975 | entry = pmdp_invalidate(vma, address, pmd); |
| 976 | entry = pmd_wrprotect(entry); |
| 977 | entry = pmd_mkclean(entry); |
| 978 | set_pmd_at(vma->vm_mm, address, pmd, entry); |
| 979 | ret = 1; |
| 980 | #else |
| 981 | /* unexpected pmd-mapped folio? */ |
| 982 | WARN_ON_ONCE(1); |
| 983 | #endif |
| 984 | } |
| 985 | |
| 986 | /* |
| 987 | * No need to call mmu_notifier_invalidate_range() as we are |
| 988 | * downgrading page table protection not changing it to point |
| 989 | * to a new page. |
| 990 | * |
| 991 | * See Documentation/vm/mmu_notifier.rst |
| 992 | */ |
| 993 | if (ret) |
| 994 | cleaned++; |
| 995 | } |
| 996 | |
| 997 | mmu_notifier_invalidate_range_end(&range); |
| 998 | |
| 999 | return cleaned; |
| 1000 | } |
| 1001 | |
| 1002 | static bool page_mkclean_one(struct folio *folio, struct vm_area_struct *vma, |
| 1003 | unsigned long address, void *arg) |
| 1004 | { |
| 1005 | DEFINE_FOLIO_VMA_WALK(pvmw, folio, vma, address, PVMW_SYNC); |
| 1006 | int *cleaned = arg; |
| 1007 | |
| 1008 | *cleaned += page_vma_mkclean_one(&pvmw); |
| 1009 | |
| 1010 | return true; |
| 1011 | } |
| 1012 | |
| 1013 | static bool invalid_mkclean_vma(struct vm_area_struct *vma, void *arg) |
| 1014 | { |
| 1015 | if (vma->vm_flags & VM_SHARED) |
| 1016 | return false; |
| 1017 | |
| 1018 | return true; |
| 1019 | } |
| 1020 | |
| 1021 | int folio_mkclean(struct folio *folio) |
| 1022 | { |
| 1023 | int cleaned = 0; |
| 1024 | struct address_space *mapping; |
| 1025 | struct rmap_walk_control rwc = { |
| 1026 | .arg = (void *)&cleaned, |
| 1027 | .rmap_one = page_mkclean_one, |
| 1028 | .invalid_vma = invalid_mkclean_vma, |
| 1029 | }; |
| 1030 | |
| 1031 | BUG_ON(!folio_test_locked(folio)); |
| 1032 | |
| 1033 | if (!folio_mapped(folio)) |
| 1034 | return 0; |
| 1035 | |
| 1036 | mapping = folio_mapping(folio); |
| 1037 | if (!mapping) |
| 1038 | return 0; |
| 1039 | |
| 1040 | rmap_walk(folio, &rwc); |
| 1041 | |
| 1042 | return cleaned; |
| 1043 | } |
| 1044 | EXPORT_SYMBOL_GPL(folio_mkclean); |
| 1045 | |
| 1046 | /** |
| 1047 | * pfn_mkclean_range - Cleans the PTEs (including PMDs) mapped with range of |
| 1048 | * [@pfn, @pfn + @nr_pages) at the specific offset (@pgoff) |
| 1049 | * within the @vma of shared mappings. And since clean PTEs |
| 1050 | * should also be readonly, write protects them too. |
| 1051 | * @pfn: start pfn. |
| 1052 | * @nr_pages: number of physically contiguous pages srarting with @pfn. |
| 1053 | * @pgoff: page offset that the @pfn mapped with. |
| 1054 | * @vma: vma that @pfn mapped within. |
| 1055 | * |
| 1056 | * Returns the number of cleaned PTEs (including PMDs). |
| 1057 | */ |
| 1058 | int pfn_mkclean_range(unsigned long pfn, unsigned long nr_pages, pgoff_t pgoff, |
| 1059 | struct vm_area_struct *vma) |
| 1060 | { |
| 1061 | struct page_vma_mapped_walk pvmw = { |
| 1062 | .pfn = pfn, |
| 1063 | .nr_pages = nr_pages, |
| 1064 | .pgoff = pgoff, |
| 1065 | .vma = vma, |
| 1066 | .flags = PVMW_SYNC, |
| 1067 | }; |
| 1068 | |
| 1069 | if (invalid_mkclean_vma(vma, NULL)) |
| 1070 | return 0; |
| 1071 | |
| 1072 | pvmw.address = vma_pgoff_address(pgoff, nr_pages, vma); |
| 1073 | VM_BUG_ON_VMA(pvmw.address == -EFAULT, vma); |
| 1074 | |
| 1075 | return page_vma_mkclean_one(&pvmw); |
| 1076 | } |
| 1077 | |
| 1078 | /** |
| 1079 | * page_move_anon_rmap - move a page to our anon_vma |
| 1080 | * @page: the page to move to our anon_vma |
| 1081 | * @vma: the vma the page belongs to |
| 1082 | * |
| 1083 | * When a page belongs exclusively to one process after a COW event, |
| 1084 | * that page can be moved into the anon_vma that belongs to just that |
| 1085 | * process, so the rmap code will not search the parent or sibling |
| 1086 | * processes. |
| 1087 | */ |
| 1088 | void page_move_anon_rmap(struct page *page, struct vm_area_struct *vma) |
| 1089 | { |
| 1090 | struct anon_vma *anon_vma = vma->anon_vma; |
| 1091 | |
| 1092 | page = compound_head(page); |
| 1093 | |
| 1094 | VM_BUG_ON_PAGE(!PageLocked(page), page); |
| 1095 | VM_BUG_ON_VMA(!anon_vma, vma); |
| 1096 | |
| 1097 | anon_vma = (void *) anon_vma + PAGE_MAPPING_ANON; |
| 1098 | /* |
| 1099 | * Ensure that anon_vma and the PAGE_MAPPING_ANON bit are written |
| 1100 | * simultaneously, so a concurrent reader (eg folio_referenced()'s |
| 1101 | * folio_test_anon()) will not see one without the other. |
| 1102 | */ |
| 1103 | WRITE_ONCE(page->mapping, (struct address_space *) anon_vma); |
| 1104 | } |
| 1105 | |
| 1106 | /** |
| 1107 | * __page_set_anon_rmap - set up new anonymous rmap |
| 1108 | * @page: Page or Hugepage to add to rmap |
| 1109 | * @vma: VM area to add page to. |
| 1110 | * @address: User virtual address of the mapping |
| 1111 | * @exclusive: the page is exclusively owned by the current process |
| 1112 | */ |
| 1113 | static void __page_set_anon_rmap(struct page *page, |
| 1114 | struct vm_area_struct *vma, unsigned long address, int exclusive) |
| 1115 | { |
| 1116 | struct anon_vma *anon_vma = vma->anon_vma; |
| 1117 | |
| 1118 | BUG_ON(!anon_vma); |
| 1119 | |
| 1120 | if (PageAnon(page)) |
| 1121 | return; |
| 1122 | |
| 1123 | /* |
| 1124 | * If the page isn't exclusively mapped into this vma, |
| 1125 | * we must use the _oldest_ possible anon_vma for the |
| 1126 | * page mapping! |
| 1127 | */ |
| 1128 | if (!exclusive) |
| 1129 | anon_vma = anon_vma->root; |
| 1130 | |
| 1131 | /* |
| 1132 | * page_idle does a lockless/optimistic rmap scan on page->mapping. |
| 1133 | * Make sure the compiler doesn't split the stores of anon_vma and |
| 1134 | * the PAGE_MAPPING_ANON type identifier, otherwise the rmap code |
| 1135 | * could mistake the mapping for a struct address_space and crash. |
| 1136 | */ |
| 1137 | anon_vma = (void *) anon_vma + PAGE_MAPPING_ANON; |
| 1138 | WRITE_ONCE(page->mapping, (struct address_space *) anon_vma); |
| 1139 | page->index = linear_page_index(vma, address); |
| 1140 | } |
| 1141 | |
| 1142 | /** |
| 1143 | * __page_check_anon_rmap - sanity check anonymous rmap addition |
| 1144 | * @page: the page to add the mapping to |
| 1145 | * @vma: the vm area in which the mapping is added |
| 1146 | * @address: the user virtual address mapped |
| 1147 | */ |
| 1148 | static void __page_check_anon_rmap(struct page *page, |
| 1149 | struct vm_area_struct *vma, unsigned long address) |
| 1150 | { |
| 1151 | struct folio *folio = page_folio(page); |
| 1152 | /* |
| 1153 | * The page's anon-rmap details (mapping and index) are guaranteed to |
| 1154 | * be set up correctly at this point. |
| 1155 | * |
| 1156 | * We have exclusion against page_add_anon_rmap because the caller |
| 1157 | * always holds the page locked. |
| 1158 | * |
| 1159 | * We have exclusion against page_add_new_anon_rmap because those pages |
| 1160 | * are initially only visible via the pagetables, and the pte is locked |
| 1161 | * over the call to page_add_new_anon_rmap. |
| 1162 | */ |
| 1163 | VM_BUG_ON_FOLIO(folio_anon_vma(folio)->root != vma->anon_vma->root, |
| 1164 | folio); |
| 1165 | VM_BUG_ON_PAGE(page_to_pgoff(page) != linear_page_index(vma, address), |
| 1166 | page); |
| 1167 | } |
| 1168 | |
| 1169 | /** |
| 1170 | * page_add_anon_rmap - add pte mapping to an anonymous page |
| 1171 | * @page: the page to add the mapping to |
| 1172 | * @vma: the vm area in which the mapping is added |
| 1173 | * @address: the user virtual address mapped |
| 1174 | * @flags: the rmap flags |
| 1175 | * |
| 1176 | * The caller needs to hold the pte lock, and the page must be locked in |
| 1177 | * the anon_vma case: to serialize mapping,index checking after setting, |
| 1178 | * and to ensure that PageAnon is not being upgraded racily to PageKsm |
| 1179 | * (but PageKsm is never downgraded to PageAnon). |
| 1180 | */ |
| 1181 | void page_add_anon_rmap(struct page *page, |
| 1182 | struct vm_area_struct *vma, unsigned long address, rmap_t flags) |
| 1183 | { |
| 1184 | bool compound = flags & RMAP_COMPOUND; |
| 1185 | bool first; |
| 1186 | |
| 1187 | if (unlikely(PageKsm(page))) |
| 1188 | lock_page_memcg(page); |
| 1189 | else |
| 1190 | VM_BUG_ON_PAGE(!PageLocked(page), page); |
| 1191 | |
| 1192 | if (compound) { |
| 1193 | atomic_t *mapcount; |
| 1194 | VM_BUG_ON_PAGE(!PageLocked(page), page); |
| 1195 | VM_BUG_ON_PAGE(!PageTransHuge(page), page); |
| 1196 | mapcount = compound_mapcount_ptr(page); |
| 1197 | first = atomic_inc_and_test(mapcount); |
| 1198 | } else { |
| 1199 | first = atomic_inc_and_test(&page->_mapcount); |
| 1200 | } |
| 1201 | |
| 1202 | if (first) { |
| 1203 | int nr = compound ? thp_nr_pages(page) : 1; |
| 1204 | /* |
| 1205 | * We use the irq-unsafe __{inc|mod}_zone_page_stat because |
| 1206 | * these counters are not modified in interrupt context, and |
| 1207 | * pte lock(a spinlock) is held, which implies preemption |
| 1208 | * disabled. |
| 1209 | */ |
| 1210 | if (compound) |
| 1211 | __mod_lruvec_page_state(page, NR_ANON_THPS, nr); |
| 1212 | __mod_lruvec_page_state(page, NR_ANON_MAPPED, nr); |
| 1213 | } |
| 1214 | |
| 1215 | if (unlikely(PageKsm(page))) |
| 1216 | unlock_page_memcg(page); |
| 1217 | |
| 1218 | /* address might be in next vma when migration races vma_adjust */ |
| 1219 | else if (first) |
| 1220 | __page_set_anon_rmap(page, vma, address, |
| 1221 | !!(flags & RMAP_EXCLUSIVE)); |
| 1222 | else |
| 1223 | __page_check_anon_rmap(page, vma, address); |
| 1224 | |
| 1225 | mlock_vma_page(page, vma, compound); |
| 1226 | } |
| 1227 | |
| 1228 | /** |
| 1229 | * page_add_new_anon_rmap - add pte mapping to a new anonymous page |
| 1230 | * @page: the page to add the mapping to |
| 1231 | * @vma: the vm area in which the mapping is added |
| 1232 | * @address: the user virtual address mapped |
| 1233 | * @compound: charge the page as compound or small page |
| 1234 | * |
| 1235 | * Same as page_add_anon_rmap but must only be called on *new* pages. |
| 1236 | * This means the inc-and-test can be bypassed. |
| 1237 | * Page does not have to be locked. |
| 1238 | */ |
| 1239 | void page_add_new_anon_rmap(struct page *page, |
| 1240 | struct vm_area_struct *vma, unsigned long address, bool compound) |
| 1241 | { |
| 1242 | int nr = compound ? thp_nr_pages(page) : 1; |
| 1243 | |
| 1244 | VM_BUG_ON_VMA(address < vma->vm_start || address >= vma->vm_end, vma); |
| 1245 | __SetPageSwapBacked(page); |
| 1246 | if (compound) { |
| 1247 | VM_BUG_ON_PAGE(!PageTransHuge(page), page); |
| 1248 | /* increment count (starts at -1) */ |
| 1249 | atomic_set(compound_mapcount_ptr(page), 0); |
| 1250 | atomic_set(compound_pincount_ptr(page), 0); |
| 1251 | |
| 1252 | __mod_lruvec_page_state(page, NR_ANON_THPS, nr); |
| 1253 | } else { |
| 1254 | /* Anon THP always mapped first with PMD */ |
| 1255 | VM_BUG_ON_PAGE(PageTransCompound(page), page); |
| 1256 | /* increment count (starts at -1) */ |
| 1257 | atomic_set(&page->_mapcount, 0); |
| 1258 | } |
| 1259 | __mod_lruvec_page_state(page, NR_ANON_MAPPED, nr); |
| 1260 | __page_set_anon_rmap(page, vma, address, 1); |
| 1261 | } |
| 1262 | |
| 1263 | /** |
| 1264 | * page_add_file_rmap - add pte mapping to a file page |
| 1265 | * @page: the page to add the mapping to |
| 1266 | * @vma: the vm area in which the mapping is added |
| 1267 | * @compound: charge the page as compound or small page |
| 1268 | * |
| 1269 | * The caller needs to hold the pte lock. |
| 1270 | */ |
| 1271 | void page_add_file_rmap(struct page *page, |
| 1272 | struct vm_area_struct *vma, bool compound) |
| 1273 | { |
| 1274 | int i, nr = 0; |
| 1275 | |
| 1276 | VM_BUG_ON_PAGE(compound && !PageTransHuge(page), page); |
| 1277 | lock_page_memcg(page); |
| 1278 | if (compound && PageTransHuge(page)) { |
| 1279 | int nr_pages = thp_nr_pages(page); |
| 1280 | |
| 1281 | for (i = 0; i < nr_pages; i++) { |
| 1282 | if (atomic_inc_and_test(&page[i]._mapcount)) |
| 1283 | nr++; |
| 1284 | } |
| 1285 | if (!atomic_inc_and_test(compound_mapcount_ptr(page))) |
| 1286 | goto out; |
| 1287 | |
| 1288 | /* |
| 1289 | * It is racy to ClearPageDoubleMap in page_remove_file_rmap(); |
| 1290 | * but page lock is held by all page_add_file_rmap() compound |
| 1291 | * callers, and SetPageDoubleMap below warns if !PageLocked: |
| 1292 | * so here is a place that DoubleMap can be safely cleared. |
| 1293 | */ |
| 1294 | VM_WARN_ON_ONCE(!PageLocked(page)); |
| 1295 | if (nr == nr_pages && PageDoubleMap(page)) |
| 1296 | ClearPageDoubleMap(page); |
| 1297 | |
| 1298 | if (PageSwapBacked(page)) |
| 1299 | __mod_lruvec_page_state(page, NR_SHMEM_PMDMAPPED, |
| 1300 | nr_pages); |
| 1301 | else |
| 1302 | __mod_lruvec_page_state(page, NR_FILE_PMDMAPPED, |
| 1303 | nr_pages); |
| 1304 | } else { |
| 1305 | if (PageTransCompound(page) && page_mapping(page)) { |
| 1306 | VM_WARN_ON_ONCE(!PageLocked(page)); |
| 1307 | SetPageDoubleMap(compound_head(page)); |
| 1308 | } |
| 1309 | if (atomic_inc_and_test(&page->_mapcount)) |
| 1310 | nr++; |
| 1311 | } |
| 1312 | out: |
| 1313 | if (nr) |
| 1314 | __mod_lruvec_page_state(page, NR_FILE_MAPPED, nr); |
| 1315 | unlock_page_memcg(page); |
| 1316 | |
| 1317 | mlock_vma_page(page, vma, compound); |
| 1318 | } |
| 1319 | |
| 1320 | static void page_remove_file_rmap(struct page *page, bool compound) |
| 1321 | { |
| 1322 | int i, nr = 0; |
| 1323 | |
| 1324 | VM_BUG_ON_PAGE(compound && !PageHead(page), page); |
| 1325 | |
| 1326 | /* Hugepages are not counted in NR_FILE_MAPPED for now. */ |
| 1327 | if (unlikely(PageHuge(page))) { |
| 1328 | /* hugetlb pages are always mapped with pmds */ |
| 1329 | atomic_dec(compound_mapcount_ptr(page)); |
| 1330 | return; |
| 1331 | } |
| 1332 | |
| 1333 | /* page still mapped by someone else? */ |
| 1334 | if (compound && PageTransHuge(page)) { |
| 1335 | int nr_pages = thp_nr_pages(page); |
| 1336 | |
| 1337 | for (i = 0; i < nr_pages; i++) { |
| 1338 | if (atomic_add_negative(-1, &page[i]._mapcount)) |
| 1339 | nr++; |
| 1340 | } |
| 1341 | if (!atomic_add_negative(-1, compound_mapcount_ptr(page))) |
| 1342 | goto out; |
| 1343 | if (PageSwapBacked(page)) |
| 1344 | __mod_lruvec_page_state(page, NR_SHMEM_PMDMAPPED, |
| 1345 | -nr_pages); |
| 1346 | else |
| 1347 | __mod_lruvec_page_state(page, NR_FILE_PMDMAPPED, |
| 1348 | -nr_pages); |
| 1349 | } else { |
| 1350 | if (atomic_add_negative(-1, &page->_mapcount)) |
| 1351 | nr++; |
| 1352 | } |
| 1353 | out: |
| 1354 | if (nr) |
| 1355 | __mod_lruvec_page_state(page, NR_FILE_MAPPED, -nr); |
| 1356 | } |
| 1357 | |
| 1358 | static void page_remove_anon_compound_rmap(struct page *page) |
| 1359 | { |
| 1360 | int i, nr; |
| 1361 | |
| 1362 | if (!atomic_add_negative(-1, compound_mapcount_ptr(page))) |
| 1363 | return; |
| 1364 | |
| 1365 | /* Hugepages are not counted in NR_ANON_PAGES for now. */ |
| 1366 | if (unlikely(PageHuge(page))) |
| 1367 | return; |
| 1368 | |
| 1369 | if (!IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE)) |
| 1370 | return; |
| 1371 | |
| 1372 | __mod_lruvec_page_state(page, NR_ANON_THPS, -thp_nr_pages(page)); |
| 1373 | |
| 1374 | if (TestClearPageDoubleMap(page)) { |
| 1375 | /* |
| 1376 | * Subpages can be mapped with PTEs too. Check how many of |
| 1377 | * them are still mapped. |
| 1378 | */ |
| 1379 | for (i = 0, nr = 0; i < thp_nr_pages(page); i++) { |
| 1380 | if (atomic_add_negative(-1, &page[i]._mapcount)) |
| 1381 | nr++; |
| 1382 | } |
| 1383 | |
| 1384 | /* |
| 1385 | * Queue the page for deferred split if at least one small |
| 1386 | * page of the compound page is unmapped, but at least one |
| 1387 | * small page is still mapped. |
| 1388 | */ |
| 1389 | if (nr && nr < thp_nr_pages(page)) |
| 1390 | deferred_split_huge_page(page); |
| 1391 | } else { |
| 1392 | nr = thp_nr_pages(page); |
| 1393 | } |
| 1394 | |
| 1395 | if (nr) |
| 1396 | __mod_lruvec_page_state(page, NR_ANON_MAPPED, -nr); |
| 1397 | } |
| 1398 | |
| 1399 | /** |
| 1400 | * page_remove_rmap - take down pte mapping from a page |
| 1401 | * @page: page to remove mapping from |
| 1402 | * @vma: the vm area from which the mapping is removed |
| 1403 | * @compound: uncharge the page as compound or small page |
| 1404 | * |
| 1405 | * The caller needs to hold the pte lock. |
| 1406 | */ |
| 1407 | void page_remove_rmap(struct page *page, |
| 1408 | struct vm_area_struct *vma, bool compound) |
| 1409 | { |
| 1410 | lock_page_memcg(page); |
| 1411 | |
| 1412 | if (!PageAnon(page)) { |
| 1413 | page_remove_file_rmap(page, compound); |
| 1414 | goto out; |
| 1415 | } |
| 1416 | |
| 1417 | if (compound) { |
| 1418 | page_remove_anon_compound_rmap(page); |
| 1419 | goto out; |
| 1420 | } |
| 1421 | |
| 1422 | /* page still mapped by someone else? */ |
| 1423 | if (!atomic_add_negative(-1, &page->_mapcount)) |
| 1424 | goto out; |
| 1425 | |
| 1426 | /* |
| 1427 | * We use the irq-unsafe __{inc|mod}_zone_page_stat because |
| 1428 | * these counters are not modified in interrupt context, and |
| 1429 | * pte lock(a spinlock) is held, which implies preemption disabled. |
| 1430 | */ |
| 1431 | __dec_lruvec_page_state(page, NR_ANON_MAPPED); |
| 1432 | |
| 1433 | if (PageTransCompound(page)) |
| 1434 | deferred_split_huge_page(compound_head(page)); |
| 1435 | |
| 1436 | /* |
| 1437 | * It would be tidy to reset the PageAnon mapping here, |
| 1438 | * but that might overwrite a racing page_add_anon_rmap |
| 1439 | * which increments mapcount after us but sets mapping |
| 1440 | * before us: so leave the reset to free_unref_page, |
| 1441 | * and remember that it's only reliable while mapped. |
| 1442 | * Leaving it set also helps swapoff to reinstate ptes |
| 1443 | * faster for those pages still in swapcache. |
| 1444 | */ |
| 1445 | out: |
| 1446 | unlock_page_memcg(page); |
| 1447 | |
| 1448 | munlock_vma_page(page, vma, compound); |
| 1449 | } |
| 1450 | |
| 1451 | /* |
| 1452 | * @arg: enum ttu_flags will be passed to this argument |
| 1453 | */ |
| 1454 | static bool try_to_unmap_one(struct folio *folio, struct vm_area_struct *vma, |
| 1455 | unsigned long address, void *arg) |
| 1456 | { |
| 1457 | struct mm_struct *mm = vma->vm_mm; |
| 1458 | DEFINE_FOLIO_VMA_WALK(pvmw, folio, vma, address, 0); |
| 1459 | pte_t pteval; |
| 1460 | struct page *subpage; |
| 1461 | bool ret = true; |
| 1462 | struct mmu_notifier_range range; |
| 1463 | enum ttu_flags flags = (enum ttu_flags)(long)arg; |
| 1464 | |
| 1465 | /* |
| 1466 | * When racing against e.g. zap_pte_range() on another cpu, |
| 1467 | * in between its ptep_get_and_clear_full() and page_remove_rmap(), |
| 1468 | * try_to_unmap() may return before page_mapped() has become false, |
| 1469 | * if page table locking is skipped: use TTU_SYNC to wait for that. |
| 1470 | */ |
| 1471 | if (flags & TTU_SYNC) |
| 1472 | pvmw.flags = PVMW_SYNC; |
| 1473 | |
| 1474 | if (flags & TTU_SPLIT_HUGE_PMD) |
| 1475 | split_huge_pmd_address(vma, address, false, folio); |
| 1476 | |
| 1477 | /* |
| 1478 | * For THP, we have to assume the worse case ie pmd for invalidation. |
| 1479 | * For hugetlb, it could be much worse if we need to do pud |
| 1480 | * invalidation in the case of pmd sharing. |
| 1481 | * |
| 1482 | * Note that the folio can not be freed in this function as call of |
| 1483 | * try_to_unmap() must hold a reference on the folio. |
| 1484 | */ |
| 1485 | range.end = vma_address_end(&pvmw); |
| 1486 | mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, vma->vm_mm, |
| 1487 | address, range.end); |
| 1488 | if (folio_test_hugetlb(folio)) { |
| 1489 | /* |
| 1490 | * If sharing is possible, start and end will be adjusted |
| 1491 | * accordingly. |
| 1492 | */ |
| 1493 | adjust_range_if_pmd_sharing_possible(vma, &range.start, |
| 1494 | &range.end); |
| 1495 | } |
| 1496 | mmu_notifier_invalidate_range_start(&range); |
| 1497 | |
| 1498 | while (page_vma_mapped_walk(&pvmw)) { |
| 1499 | /* Unexpected PMD-mapped THP? */ |
| 1500 | VM_BUG_ON_FOLIO(!pvmw.pte, folio); |
| 1501 | |
| 1502 | /* |
| 1503 | * If the folio is in an mlock()d vma, we must not swap it out. |
| 1504 | */ |
| 1505 | if (!(flags & TTU_IGNORE_MLOCK) && |
| 1506 | (vma->vm_flags & VM_LOCKED)) { |
| 1507 | /* Restore the mlock which got missed */ |
| 1508 | mlock_vma_folio(folio, vma, false); |
| 1509 | page_vma_mapped_walk_done(&pvmw); |
| 1510 | ret = false; |
| 1511 | break; |
| 1512 | } |
| 1513 | |
| 1514 | subpage = folio_page(folio, |
| 1515 | pte_pfn(*pvmw.pte) - folio_pfn(folio)); |
| 1516 | address = pvmw.address; |
| 1517 | |
| 1518 | if (folio_test_hugetlb(folio) && !folio_test_anon(folio)) { |
| 1519 | /* |
| 1520 | * To call huge_pmd_unshare, i_mmap_rwsem must be |
| 1521 | * held in write mode. Caller needs to explicitly |
| 1522 | * do this outside rmap routines. |
| 1523 | */ |
| 1524 | VM_BUG_ON(!(flags & TTU_RMAP_LOCKED)); |
| 1525 | if (huge_pmd_unshare(mm, vma, &address, pvmw.pte)) { |
| 1526 | /* |
| 1527 | * huge_pmd_unshare unmapped an entire PMD |
| 1528 | * page. There is no way of knowing exactly |
| 1529 | * which PMDs may be cached for this mm, so |
| 1530 | * we must flush them all. start/end were |
| 1531 | * already adjusted above to cover this range. |
| 1532 | */ |
| 1533 | flush_cache_range(vma, range.start, range.end); |
| 1534 | flush_tlb_range(vma, range.start, range.end); |
| 1535 | mmu_notifier_invalidate_range(mm, range.start, |
| 1536 | range.end); |
| 1537 | |
| 1538 | /* |
| 1539 | * The ref count of the PMD page was dropped |
| 1540 | * which is part of the way map counting |
| 1541 | * is done for shared PMDs. Return 'true' |
| 1542 | * here. When there is no other sharing, |
| 1543 | * huge_pmd_unshare returns false and we will |
| 1544 | * unmap the actual page and drop map count |
| 1545 | * to zero. |
| 1546 | */ |
| 1547 | page_vma_mapped_walk_done(&pvmw); |
| 1548 | break; |
| 1549 | } |
| 1550 | } |
| 1551 | |
| 1552 | /* Nuke the page table entry. */ |
| 1553 | flush_cache_page(vma, address, pte_pfn(*pvmw.pte)); |
| 1554 | if (should_defer_flush(mm, flags)) { |
| 1555 | /* |
| 1556 | * We clear the PTE but do not flush so potentially |
| 1557 | * a remote CPU could still be writing to the folio. |
| 1558 | * If the entry was previously clean then the |
| 1559 | * architecture must guarantee that a clear->dirty |
| 1560 | * transition on a cached TLB entry is written through |
| 1561 | * and traps if the PTE is unmapped. |
| 1562 | */ |
| 1563 | pteval = ptep_get_and_clear(mm, address, pvmw.pte); |
| 1564 | |
| 1565 | set_tlb_ubc_flush_pending(mm, pte_dirty(pteval)); |
| 1566 | } else { |
| 1567 | pteval = ptep_clear_flush(vma, address, pvmw.pte); |
| 1568 | } |
| 1569 | |
| 1570 | /* Set the dirty flag on the folio now the pte is gone. */ |
| 1571 | if (pte_dirty(pteval)) |
| 1572 | folio_mark_dirty(folio); |
| 1573 | |
| 1574 | /* Update high watermark before we lower rss */ |
| 1575 | update_hiwater_rss(mm); |
| 1576 | |
| 1577 | if (PageHWPoison(subpage) && !(flags & TTU_IGNORE_HWPOISON)) { |
| 1578 | pteval = swp_entry_to_pte(make_hwpoison_entry(subpage)); |
| 1579 | if (folio_test_hugetlb(folio)) { |
| 1580 | hugetlb_count_sub(folio_nr_pages(folio), mm); |
| 1581 | set_huge_swap_pte_at(mm, address, |
| 1582 | pvmw.pte, pteval, |
| 1583 | vma_mmu_pagesize(vma)); |
| 1584 | } else { |
| 1585 | dec_mm_counter(mm, mm_counter(&folio->page)); |
| 1586 | set_pte_at(mm, address, pvmw.pte, pteval); |
| 1587 | } |
| 1588 | |
| 1589 | } else if (pte_unused(pteval) && !userfaultfd_armed(vma)) { |
| 1590 | /* |
| 1591 | * The guest indicated that the page content is of no |
| 1592 | * interest anymore. Simply discard the pte, vmscan |
| 1593 | * will take care of the rest. |
| 1594 | * A future reference will then fault in a new zero |
| 1595 | * page. When userfaultfd is active, we must not drop |
| 1596 | * this page though, as its main user (postcopy |
| 1597 | * migration) will not expect userfaults on already |
| 1598 | * copied pages. |
| 1599 | */ |
| 1600 | dec_mm_counter(mm, mm_counter(&folio->page)); |
| 1601 | /* We have to invalidate as we cleared the pte */ |
| 1602 | mmu_notifier_invalidate_range(mm, address, |
| 1603 | address + PAGE_SIZE); |
| 1604 | } else if (folio_test_anon(folio)) { |
| 1605 | swp_entry_t entry = { .val = page_private(subpage) }; |
| 1606 | pte_t swp_pte; |
| 1607 | /* |
| 1608 | * Store the swap location in the pte. |
| 1609 | * See handle_pte_fault() ... |
| 1610 | */ |
| 1611 | if (unlikely(folio_test_swapbacked(folio) != |
| 1612 | folio_test_swapcache(folio))) { |
| 1613 | WARN_ON_ONCE(1); |
| 1614 | ret = false; |
| 1615 | /* We have to invalidate as we cleared the pte */ |
| 1616 | mmu_notifier_invalidate_range(mm, address, |
| 1617 | address + PAGE_SIZE); |
| 1618 | page_vma_mapped_walk_done(&pvmw); |
| 1619 | break; |
| 1620 | } |
| 1621 | |
| 1622 | /* MADV_FREE page check */ |
| 1623 | if (!folio_test_swapbacked(folio)) { |
| 1624 | int ref_count, map_count; |
| 1625 | |
| 1626 | /* |
| 1627 | * Synchronize with gup_pte_range(): |
| 1628 | * - clear PTE; barrier; read refcount |
| 1629 | * - inc refcount; barrier; read PTE |
| 1630 | */ |
| 1631 | smp_mb(); |
| 1632 | |
| 1633 | ref_count = folio_ref_count(folio); |
| 1634 | map_count = folio_mapcount(folio); |
| 1635 | |
| 1636 | /* |
| 1637 | * Order reads for page refcount and dirty flag |
| 1638 | * (see comments in __remove_mapping()). |
| 1639 | */ |
| 1640 | smp_rmb(); |
| 1641 | |
| 1642 | /* |
| 1643 | * The only page refs must be one from isolation |
| 1644 | * plus the rmap(s) (dropped by discard:). |
| 1645 | */ |
| 1646 | if (ref_count == 1 + map_count && |
| 1647 | !folio_test_dirty(folio)) { |
| 1648 | /* Invalidate as we cleared the pte */ |
| 1649 | mmu_notifier_invalidate_range(mm, |
| 1650 | address, address + PAGE_SIZE); |
| 1651 | dec_mm_counter(mm, MM_ANONPAGES); |
| 1652 | goto discard; |
| 1653 | } |
| 1654 | |
| 1655 | /* |
| 1656 | * If the folio was redirtied, it cannot be |
| 1657 | * discarded. Remap the page to page table. |
| 1658 | */ |
| 1659 | set_pte_at(mm, address, pvmw.pte, pteval); |
| 1660 | folio_set_swapbacked(folio); |
| 1661 | ret = false; |
| 1662 | page_vma_mapped_walk_done(&pvmw); |
| 1663 | break; |
| 1664 | } |
| 1665 | |
| 1666 | if (swap_duplicate(entry) < 0) { |
| 1667 | set_pte_at(mm, address, pvmw.pte, pteval); |
| 1668 | ret = false; |
| 1669 | page_vma_mapped_walk_done(&pvmw); |
| 1670 | break; |
| 1671 | } |
| 1672 | if (arch_unmap_one(mm, vma, address, pteval) < 0) { |
| 1673 | swap_free(entry); |
| 1674 | set_pte_at(mm, address, pvmw.pte, pteval); |
| 1675 | ret = false; |
| 1676 | page_vma_mapped_walk_done(&pvmw); |
| 1677 | break; |
| 1678 | } |
| 1679 | if (list_empty(&mm->mmlist)) { |
| 1680 | spin_lock(&mmlist_lock); |
| 1681 | if (list_empty(&mm->mmlist)) |
| 1682 | list_add(&mm->mmlist, &init_mm.mmlist); |
| 1683 | spin_unlock(&mmlist_lock); |
| 1684 | } |
| 1685 | dec_mm_counter(mm, MM_ANONPAGES); |
| 1686 | inc_mm_counter(mm, MM_SWAPENTS); |
| 1687 | swp_pte = swp_entry_to_pte(entry); |
| 1688 | if (pte_soft_dirty(pteval)) |
| 1689 | swp_pte = pte_swp_mksoft_dirty(swp_pte); |
| 1690 | if (pte_uffd_wp(pteval)) |
| 1691 | swp_pte = pte_swp_mkuffd_wp(swp_pte); |
| 1692 | set_pte_at(mm, address, pvmw.pte, swp_pte); |
| 1693 | /* Invalidate as we cleared the pte */ |
| 1694 | mmu_notifier_invalidate_range(mm, address, |
| 1695 | address + PAGE_SIZE); |
| 1696 | } else { |
| 1697 | /* |
| 1698 | * This is a locked file-backed folio, |
| 1699 | * so it cannot be removed from the page |
| 1700 | * cache and replaced by a new folio before |
| 1701 | * mmu_notifier_invalidate_range_end, so no |
| 1702 | * concurrent thread might update its page table |
| 1703 | * to point at a new folio while a device is |
| 1704 | * still using this folio. |
| 1705 | * |
| 1706 | * See Documentation/vm/mmu_notifier.rst |
| 1707 | */ |
| 1708 | dec_mm_counter(mm, mm_counter_file(&folio->page)); |
| 1709 | } |
| 1710 | discard: |
| 1711 | /* |
| 1712 | * No need to call mmu_notifier_invalidate_range() it has be |
| 1713 | * done above for all cases requiring it to happen under page |
| 1714 | * table lock before mmu_notifier_invalidate_range_end() |
| 1715 | * |
| 1716 | * See Documentation/vm/mmu_notifier.rst |
| 1717 | */ |
| 1718 | page_remove_rmap(subpage, vma, folio_test_hugetlb(folio)); |
| 1719 | if (vma->vm_flags & VM_LOCKED) |
| 1720 | mlock_page_drain_local(); |
| 1721 | folio_put(folio); |
| 1722 | } |
| 1723 | |
| 1724 | mmu_notifier_invalidate_range_end(&range); |
| 1725 | |
| 1726 | return ret; |
| 1727 | } |
| 1728 | |
| 1729 | static bool invalid_migration_vma(struct vm_area_struct *vma, void *arg) |
| 1730 | { |
| 1731 | return vma_is_temporary_stack(vma); |
| 1732 | } |
| 1733 | |
| 1734 | static int page_not_mapped(struct folio *folio) |
| 1735 | { |
| 1736 | return !folio_mapped(folio); |
| 1737 | } |
| 1738 | |
| 1739 | /** |
| 1740 | * try_to_unmap - Try to remove all page table mappings to a folio. |
| 1741 | * @folio: The folio to unmap. |
| 1742 | * @flags: action and flags |
| 1743 | * |
| 1744 | * Tries to remove all the page table entries which are mapping this |
| 1745 | * folio. It is the caller's responsibility to check if the folio is |
| 1746 | * still mapped if needed (use TTU_SYNC to prevent accounting races). |
| 1747 | * |
| 1748 | * Context: Caller must hold the folio lock. |
| 1749 | */ |
| 1750 | void try_to_unmap(struct folio *folio, enum ttu_flags flags) |
| 1751 | { |
| 1752 | struct rmap_walk_control rwc = { |
| 1753 | .rmap_one = try_to_unmap_one, |
| 1754 | .arg = (void *)flags, |
| 1755 | .done = page_not_mapped, |
| 1756 | .anon_lock = folio_lock_anon_vma_read, |
| 1757 | }; |
| 1758 | |
| 1759 | if (flags & TTU_RMAP_LOCKED) |
| 1760 | rmap_walk_locked(folio, &rwc); |
| 1761 | else |
| 1762 | rmap_walk(folio, &rwc); |
| 1763 | } |
| 1764 | |
| 1765 | /* |
| 1766 | * @arg: enum ttu_flags will be passed to this argument. |
| 1767 | * |
| 1768 | * If TTU_SPLIT_HUGE_PMD is specified any PMD mappings will be split into PTEs |
| 1769 | * containing migration entries. |
| 1770 | */ |
| 1771 | static bool try_to_migrate_one(struct folio *folio, struct vm_area_struct *vma, |
| 1772 | unsigned long address, void *arg) |
| 1773 | { |
| 1774 | struct mm_struct *mm = vma->vm_mm; |
| 1775 | DEFINE_FOLIO_VMA_WALK(pvmw, folio, vma, address, 0); |
| 1776 | pte_t pteval; |
| 1777 | struct page *subpage; |
| 1778 | bool ret = true; |
| 1779 | struct mmu_notifier_range range; |
| 1780 | enum ttu_flags flags = (enum ttu_flags)(long)arg; |
| 1781 | |
| 1782 | /* |
| 1783 | * When racing against e.g. zap_pte_range() on another cpu, |
| 1784 | * in between its ptep_get_and_clear_full() and page_remove_rmap(), |
| 1785 | * try_to_migrate() may return before page_mapped() has become false, |
| 1786 | * if page table locking is skipped: use TTU_SYNC to wait for that. |
| 1787 | */ |
| 1788 | if (flags & TTU_SYNC) |
| 1789 | pvmw.flags = PVMW_SYNC; |
| 1790 | |
| 1791 | /* |
| 1792 | * unmap_page() in mm/huge_memory.c is the only user of migration with |
| 1793 | * TTU_SPLIT_HUGE_PMD and it wants to freeze. |
| 1794 | */ |
| 1795 | if (flags & TTU_SPLIT_HUGE_PMD) |
| 1796 | split_huge_pmd_address(vma, address, true, folio); |
| 1797 | |
| 1798 | /* |
| 1799 | * For THP, we have to assume the worse case ie pmd for invalidation. |
| 1800 | * For hugetlb, it could be much worse if we need to do pud |
| 1801 | * invalidation in the case of pmd sharing. |
| 1802 | * |
| 1803 | * Note that the page can not be free in this function as call of |
| 1804 | * try_to_unmap() must hold a reference on the page. |
| 1805 | */ |
| 1806 | range.end = vma_address_end(&pvmw); |
| 1807 | mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, vma->vm_mm, |
| 1808 | address, range.end); |
| 1809 | if (folio_test_hugetlb(folio)) { |
| 1810 | /* |
| 1811 | * If sharing is possible, start and end will be adjusted |
| 1812 | * accordingly. |
| 1813 | */ |
| 1814 | adjust_range_if_pmd_sharing_possible(vma, &range.start, |
| 1815 | &range.end); |
| 1816 | } |
| 1817 | mmu_notifier_invalidate_range_start(&range); |
| 1818 | |
| 1819 | while (page_vma_mapped_walk(&pvmw)) { |
| 1820 | #ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION |
| 1821 | /* PMD-mapped THP migration entry */ |
| 1822 | if (!pvmw.pte) { |
| 1823 | subpage = folio_page(folio, |
| 1824 | pmd_pfn(*pvmw.pmd) - folio_pfn(folio)); |
| 1825 | VM_BUG_ON_FOLIO(folio_test_hugetlb(folio) || |
| 1826 | !folio_test_pmd_mappable(folio), folio); |
| 1827 | |
| 1828 | set_pmd_migration_entry(&pvmw, subpage); |
| 1829 | continue; |
| 1830 | } |
| 1831 | #endif |
| 1832 | |
| 1833 | /* Unexpected PMD-mapped THP? */ |
| 1834 | VM_BUG_ON_FOLIO(!pvmw.pte, folio); |
| 1835 | |
| 1836 | subpage = folio_page(folio, |
| 1837 | pte_pfn(*pvmw.pte) - folio_pfn(folio)); |
| 1838 | address = pvmw.address; |
| 1839 | |
| 1840 | if (folio_test_hugetlb(folio) && !folio_test_anon(folio)) { |
| 1841 | /* |
| 1842 | * To call huge_pmd_unshare, i_mmap_rwsem must be |
| 1843 | * held in write mode. Caller needs to explicitly |
| 1844 | * do this outside rmap routines. |
| 1845 | */ |
| 1846 | VM_BUG_ON(!(flags & TTU_RMAP_LOCKED)); |
| 1847 | if (huge_pmd_unshare(mm, vma, &address, pvmw.pte)) { |
| 1848 | /* |
| 1849 | * huge_pmd_unshare unmapped an entire PMD |
| 1850 | * page. There is no way of knowing exactly |
| 1851 | * which PMDs may be cached for this mm, so |
| 1852 | * we must flush them all. start/end were |
| 1853 | * already adjusted above to cover this range. |
| 1854 | */ |
| 1855 | flush_cache_range(vma, range.start, range.end); |
| 1856 | flush_tlb_range(vma, range.start, range.end); |
| 1857 | mmu_notifier_invalidate_range(mm, range.start, |
| 1858 | range.end); |
| 1859 | |
| 1860 | /* |
| 1861 | * The ref count of the PMD page was dropped |
| 1862 | * which is part of the way map counting |
| 1863 | * is done for shared PMDs. Return 'true' |
| 1864 | * here. When there is no other sharing, |
| 1865 | * huge_pmd_unshare returns false and we will |
| 1866 | * unmap the actual page and drop map count |
| 1867 | * to zero. |
| 1868 | */ |
| 1869 | page_vma_mapped_walk_done(&pvmw); |
| 1870 | break; |
| 1871 | } |
| 1872 | } |
| 1873 | |
| 1874 | /* Nuke the page table entry. */ |
| 1875 | flush_cache_page(vma, address, pte_pfn(*pvmw.pte)); |
| 1876 | pteval = ptep_clear_flush(vma, address, pvmw.pte); |
| 1877 | |
| 1878 | /* Set the dirty flag on the folio now the pte is gone. */ |
| 1879 | if (pte_dirty(pteval)) |
| 1880 | folio_mark_dirty(folio); |
| 1881 | |
| 1882 | /* Update high watermark before we lower rss */ |
| 1883 | update_hiwater_rss(mm); |
| 1884 | |
| 1885 | if (folio_is_zone_device(folio)) { |
| 1886 | unsigned long pfn = folio_pfn(folio); |
| 1887 | swp_entry_t entry; |
| 1888 | pte_t swp_pte; |
| 1889 | |
| 1890 | /* |
| 1891 | * Store the pfn of the page in a special migration |
| 1892 | * pte. do_swap_page() will wait until the migration |
| 1893 | * pte is removed and then restart fault handling. |
| 1894 | */ |
| 1895 | entry = pte_to_swp_entry(pteval); |
| 1896 | if (is_writable_device_private_entry(entry)) |
| 1897 | entry = make_writable_migration_entry(pfn); |
| 1898 | else |
| 1899 | entry = make_readable_migration_entry(pfn); |
| 1900 | swp_pte = swp_entry_to_pte(entry); |
| 1901 | |
| 1902 | /* |
| 1903 | * pteval maps a zone device page and is therefore |
| 1904 | * a swap pte. |
| 1905 | */ |
| 1906 | if (pte_swp_soft_dirty(pteval)) |
| 1907 | swp_pte = pte_swp_mksoft_dirty(swp_pte); |
| 1908 | if (pte_swp_uffd_wp(pteval)) |
| 1909 | swp_pte = pte_swp_mkuffd_wp(swp_pte); |
| 1910 | set_pte_at(mm, pvmw.address, pvmw.pte, swp_pte); |
| 1911 | trace_set_migration_pte(pvmw.address, pte_val(swp_pte), |
| 1912 | compound_order(&folio->page)); |
| 1913 | /* |
| 1914 | * No need to invalidate here it will synchronize on |
| 1915 | * against the special swap migration pte. |
| 1916 | * |
| 1917 | * The assignment to subpage above was computed from a |
| 1918 | * swap PTE which results in an invalid pointer. |
| 1919 | * Since only PAGE_SIZE pages can currently be |
| 1920 | * migrated, just set it to page. This will need to be |
| 1921 | * changed when hugepage migrations to device private |
| 1922 | * memory are supported. |
| 1923 | */ |
| 1924 | subpage = &folio->page; |
| 1925 | } else if (PageHWPoison(subpage)) { |
| 1926 | pteval = swp_entry_to_pte(make_hwpoison_entry(subpage)); |
| 1927 | if (folio_test_hugetlb(folio)) { |
| 1928 | hugetlb_count_sub(folio_nr_pages(folio), mm); |
| 1929 | set_huge_swap_pte_at(mm, address, |
| 1930 | pvmw.pte, pteval, |
| 1931 | vma_mmu_pagesize(vma)); |
| 1932 | } else { |
| 1933 | dec_mm_counter(mm, mm_counter(&folio->page)); |
| 1934 | set_pte_at(mm, address, pvmw.pte, pteval); |
| 1935 | } |
| 1936 | |
| 1937 | } else if (pte_unused(pteval) && !userfaultfd_armed(vma)) { |
| 1938 | /* |
| 1939 | * The guest indicated that the page content is of no |
| 1940 | * interest anymore. Simply discard the pte, vmscan |
| 1941 | * will take care of the rest. |
| 1942 | * A future reference will then fault in a new zero |
| 1943 | * page. When userfaultfd is active, we must not drop |
| 1944 | * this page though, as its main user (postcopy |
| 1945 | * migration) will not expect userfaults on already |
| 1946 | * copied pages. |
| 1947 | */ |
| 1948 | dec_mm_counter(mm, mm_counter(&folio->page)); |
| 1949 | /* We have to invalidate as we cleared the pte */ |
| 1950 | mmu_notifier_invalidate_range(mm, address, |
| 1951 | address + PAGE_SIZE); |
| 1952 | } else { |
| 1953 | swp_entry_t entry; |
| 1954 | pte_t swp_pte; |
| 1955 | |
| 1956 | if (arch_unmap_one(mm, vma, address, pteval) < 0) { |
| 1957 | set_pte_at(mm, address, pvmw.pte, pteval); |
| 1958 | ret = false; |
| 1959 | page_vma_mapped_walk_done(&pvmw); |
| 1960 | break; |
| 1961 | } |
| 1962 | |
| 1963 | /* |
| 1964 | * Store the pfn of the page in a special migration |
| 1965 | * pte. do_swap_page() will wait until the migration |
| 1966 | * pte is removed and then restart fault handling. |
| 1967 | */ |
| 1968 | if (pte_write(pteval)) |
| 1969 | entry = make_writable_migration_entry( |
| 1970 | page_to_pfn(subpage)); |
| 1971 | else |
| 1972 | entry = make_readable_migration_entry( |
| 1973 | page_to_pfn(subpage)); |
| 1974 | |
| 1975 | swp_pte = swp_entry_to_pte(entry); |
| 1976 | if (pte_soft_dirty(pteval)) |
| 1977 | swp_pte = pte_swp_mksoft_dirty(swp_pte); |
| 1978 | if (pte_uffd_wp(pteval)) |
| 1979 | swp_pte = pte_swp_mkuffd_wp(swp_pte); |
| 1980 | set_pte_at(mm, address, pvmw.pte, swp_pte); |
| 1981 | trace_set_migration_pte(address, pte_val(swp_pte), |
| 1982 | compound_order(&folio->page)); |
| 1983 | /* |
| 1984 | * No need to invalidate here it will synchronize on |
| 1985 | * against the special swap migration pte. |
| 1986 | */ |
| 1987 | } |
| 1988 | |
| 1989 | /* |
| 1990 | * No need to call mmu_notifier_invalidate_range() it has be |
| 1991 | * done above for all cases requiring it to happen under page |
| 1992 | * table lock before mmu_notifier_invalidate_range_end() |
| 1993 | * |
| 1994 | * See Documentation/vm/mmu_notifier.rst |
| 1995 | */ |
| 1996 | page_remove_rmap(subpage, vma, folio_test_hugetlb(folio)); |
| 1997 | if (vma->vm_flags & VM_LOCKED) |
| 1998 | mlock_page_drain_local(); |
| 1999 | folio_put(folio); |
| 2000 | } |
| 2001 | |
| 2002 | mmu_notifier_invalidate_range_end(&range); |
| 2003 | |
| 2004 | return ret; |
| 2005 | } |
| 2006 | |
| 2007 | /** |
| 2008 | * try_to_migrate - try to replace all page table mappings with swap entries |
| 2009 | * @folio: the folio to replace page table entries for |
| 2010 | * @flags: action and flags |
| 2011 | * |
| 2012 | * Tries to remove all the page table entries which are mapping this folio and |
| 2013 | * replace them with special swap entries. Caller must hold the folio lock. |
| 2014 | */ |
| 2015 | void try_to_migrate(struct folio *folio, enum ttu_flags flags) |
| 2016 | { |
| 2017 | struct rmap_walk_control rwc = { |
| 2018 | .rmap_one = try_to_migrate_one, |
| 2019 | .arg = (void *)flags, |
| 2020 | .done = page_not_mapped, |
| 2021 | .anon_lock = folio_lock_anon_vma_read, |
| 2022 | }; |
| 2023 | |
| 2024 | /* |
| 2025 | * Migration always ignores mlock and only supports TTU_RMAP_LOCKED and |
| 2026 | * TTU_SPLIT_HUGE_PMD and TTU_SYNC flags. |
| 2027 | */ |
| 2028 | if (WARN_ON_ONCE(flags & ~(TTU_RMAP_LOCKED | TTU_SPLIT_HUGE_PMD | |
| 2029 | TTU_SYNC))) |
| 2030 | return; |
| 2031 | |
| 2032 | if (folio_is_zone_device(folio) && !folio_is_device_private(folio)) |
| 2033 | return; |
| 2034 | |
| 2035 | /* |
| 2036 | * During exec, a temporary VMA is setup and later moved. |
| 2037 | * The VMA is moved under the anon_vma lock but not the |
| 2038 | * page tables leading to a race where migration cannot |
| 2039 | * find the migration ptes. Rather than increasing the |
| 2040 | * locking requirements of exec(), migration skips |
| 2041 | * temporary VMAs until after exec() completes. |
| 2042 | */ |
| 2043 | if (!folio_test_ksm(folio) && folio_test_anon(folio)) |
| 2044 | rwc.invalid_vma = invalid_migration_vma; |
| 2045 | |
| 2046 | if (flags & TTU_RMAP_LOCKED) |
| 2047 | rmap_walk_locked(folio, &rwc); |
| 2048 | else |
| 2049 | rmap_walk(folio, &rwc); |
| 2050 | } |
| 2051 | |
| 2052 | #ifdef CONFIG_DEVICE_PRIVATE |
| 2053 | struct make_exclusive_args { |
| 2054 | struct mm_struct *mm; |
| 2055 | unsigned long address; |
| 2056 | void *owner; |
| 2057 | bool valid; |
| 2058 | }; |
| 2059 | |
| 2060 | static bool page_make_device_exclusive_one(struct folio *folio, |
| 2061 | struct vm_area_struct *vma, unsigned long address, void *priv) |
| 2062 | { |
| 2063 | struct mm_struct *mm = vma->vm_mm; |
| 2064 | DEFINE_FOLIO_VMA_WALK(pvmw, folio, vma, address, 0); |
| 2065 | struct make_exclusive_args *args = priv; |
| 2066 | pte_t pteval; |
| 2067 | struct page *subpage; |
| 2068 | bool ret = true; |
| 2069 | struct mmu_notifier_range range; |
| 2070 | swp_entry_t entry; |
| 2071 | pte_t swp_pte; |
| 2072 | |
| 2073 | mmu_notifier_range_init_owner(&range, MMU_NOTIFY_EXCLUSIVE, 0, vma, |
| 2074 | vma->vm_mm, address, min(vma->vm_end, |
| 2075 | address + folio_size(folio)), |
| 2076 | args->owner); |
| 2077 | mmu_notifier_invalidate_range_start(&range); |
| 2078 | |
| 2079 | while (page_vma_mapped_walk(&pvmw)) { |
| 2080 | /* Unexpected PMD-mapped THP? */ |
| 2081 | VM_BUG_ON_FOLIO(!pvmw.pte, folio); |
| 2082 | |
| 2083 | if (!pte_present(*pvmw.pte)) { |
| 2084 | ret = false; |
| 2085 | page_vma_mapped_walk_done(&pvmw); |
| 2086 | break; |
| 2087 | } |
| 2088 | |
| 2089 | subpage = folio_page(folio, |
| 2090 | pte_pfn(*pvmw.pte) - folio_pfn(folio)); |
| 2091 | address = pvmw.address; |
| 2092 | |
| 2093 | /* Nuke the page table entry. */ |
| 2094 | flush_cache_page(vma, address, pte_pfn(*pvmw.pte)); |
| 2095 | pteval = ptep_clear_flush(vma, address, pvmw.pte); |
| 2096 | |
| 2097 | /* Set the dirty flag on the folio now the pte is gone. */ |
| 2098 | if (pte_dirty(pteval)) |
| 2099 | folio_mark_dirty(folio); |
| 2100 | |
| 2101 | /* |
| 2102 | * Check that our target page is still mapped at the expected |
| 2103 | * address. |
| 2104 | */ |
| 2105 | if (args->mm == mm && args->address == address && |
| 2106 | pte_write(pteval)) |
| 2107 | args->valid = true; |
| 2108 | |
| 2109 | /* |
| 2110 | * Store the pfn of the page in a special migration |
| 2111 | * pte. do_swap_page() will wait until the migration |
| 2112 | * pte is removed and then restart fault handling. |
| 2113 | */ |
| 2114 | if (pte_write(pteval)) |
| 2115 | entry = make_writable_device_exclusive_entry( |
| 2116 | page_to_pfn(subpage)); |
| 2117 | else |
| 2118 | entry = make_readable_device_exclusive_entry( |
| 2119 | page_to_pfn(subpage)); |
| 2120 | swp_pte = swp_entry_to_pte(entry); |
| 2121 | if (pte_soft_dirty(pteval)) |
| 2122 | swp_pte = pte_swp_mksoft_dirty(swp_pte); |
| 2123 | if (pte_uffd_wp(pteval)) |
| 2124 | swp_pte = pte_swp_mkuffd_wp(swp_pte); |
| 2125 | |
| 2126 | set_pte_at(mm, address, pvmw.pte, swp_pte); |
| 2127 | |
| 2128 | /* |
| 2129 | * There is a reference on the page for the swap entry which has |
| 2130 | * been removed, so shouldn't take another. |
| 2131 | */ |
| 2132 | page_remove_rmap(subpage, vma, false); |
| 2133 | } |
| 2134 | |
| 2135 | mmu_notifier_invalidate_range_end(&range); |
| 2136 | |
| 2137 | return ret; |
| 2138 | } |
| 2139 | |
| 2140 | /** |
| 2141 | * folio_make_device_exclusive - Mark the folio exclusively owned by a device. |
| 2142 | * @folio: The folio to replace page table entries for. |
| 2143 | * @mm: The mm_struct where the folio is expected to be mapped. |
| 2144 | * @address: Address where the folio is expected to be mapped. |
| 2145 | * @owner: passed to MMU_NOTIFY_EXCLUSIVE range notifier callbacks |
| 2146 | * |
| 2147 | * Tries to remove all the page table entries which are mapping this |
| 2148 | * folio and replace them with special device exclusive swap entries to |
| 2149 | * grant a device exclusive access to the folio. |
| 2150 | * |
| 2151 | * Context: Caller must hold the folio lock. |
| 2152 | * Return: false if the page is still mapped, or if it could not be unmapped |
| 2153 | * from the expected address. Otherwise returns true (success). |
| 2154 | */ |
| 2155 | static bool folio_make_device_exclusive(struct folio *folio, |
| 2156 | struct mm_struct *mm, unsigned long address, void *owner) |
| 2157 | { |
| 2158 | struct make_exclusive_args args = { |
| 2159 | .mm = mm, |
| 2160 | .address = address, |
| 2161 | .owner = owner, |
| 2162 | .valid = false, |
| 2163 | }; |
| 2164 | struct rmap_walk_control rwc = { |
| 2165 | .rmap_one = page_make_device_exclusive_one, |
| 2166 | .done = page_not_mapped, |
| 2167 | .anon_lock = folio_lock_anon_vma_read, |
| 2168 | .arg = &args, |
| 2169 | }; |
| 2170 | |
| 2171 | /* |
| 2172 | * Restrict to anonymous folios for now to avoid potential writeback |
| 2173 | * issues. |
| 2174 | */ |
| 2175 | if (!folio_test_anon(folio)) |
| 2176 | return false; |
| 2177 | |
| 2178 | rmap_walk(folio, &rwc); |
| 2179 | |
| 2180 | return args.valid && !folio_mapcount(folio); |
| 2181 | } |
| 2182 | |
| 2183 | /** |
| 2184 | * make_device_exclusive_range() - Mark a range for exclusive use by a device |
| 2185 | * @mm: mm_struct of assoicated target process |
| 2186 | * @start: start of the region to mark for exclusive device access |
| 2187 | * @end: end address of region |
| 2188 | * @pages: returns the pages which were successfully marked for exclusive access |
| 2189 | * @owner: passed to MMU_NOTIFY_EXCLUSIVE range notifier to allow filtering |
| 2190 | * |
| 2191 | * Returns: number of pages found in the range by GUP. A page is marked for |
| 2192 | * exclusive access only if the page pointer is non-NULL. |
| 2193 | * |
| 2194 | * This function finds ptes mapping page(s) to the given address range, locks |
| 2195 | * them and replaces mappings with special swap entries preventing userspace CPU |
| 2196 | * access. On fault these entries are replaced with the original mapping after |
| 2197 | * calling MMU notifiers. |
| 2198 | * |
| 2199 | * A driver using this to program access from a device must use a mmu notifier |
| 2200 | * critical section to hold a device specific lock during programming. Once |
| 2201 | * programming is complete it should drop the page lock and reference after |
| 2202 | * which point CPU access to the page will revoke the exclusive access. |
| 2203 | */ |
| 2204 | int make_device_exclusive_range(struct mm_struct *mm, unsigned long start, |
| 2205 | unsigned long end, struct page **pages, |
| 2206 | void *owner) |
| 2207 | { |
| 2208 | long npages = (end - start) >> PAGE_SHIFT; |
| 2209 | long i; |
| 2210 | |
| 2211 | npages = get_user_pages_remote(mm, start, npages, |
| 2212 | FOLL_GET | FOLL_WRITE | FOLL_SPLIT_PMD, |
| 2213 | pages, NULL, NULL); |
| 2214 | if (npages < 0) |
| 2215 | return npages; |
| 2216 | |
| 2217 | for (i = 0; i < npages; i++, start += PAGE_SIZE) { |
| 2218 | struct folio *folio = page_folio(pages[i]); |
| 2219 | if (PageTail(pages[i]) || !folio_trylock(folio)) { |
| 2220 | folio_put(folio); |
| 2221 | pages[i] = NULL; |
| 2222 | continue; |
| 2223 | } |
| 2224 | |
| 2225 | if (!folio_make_device_exclusive(folio, mm, start, owner)) { |
| 2226 | folio_unlock(folio); |
| 2227 | folio_put(folio); |
| 2228 | pages[i] = NULL; |
| 2229 | } |
| 2230 | } |
| 2231 | |
| 2232 | return npages; |
| 2233 | } |
| 2234 | EXPORT_SYMBOL_GPL(make_device_exclusive_range); |
| 2235 | #endif |
| 2236 | |
| 2237 | void __put_anon_vma(struct anon_vma *anon_vma) |
| 2238 | { |
| 2239 | struct anon_vma *root = anon_vma->root; |
| 2240 | |
| 2241 | anon_vma_free(anon_vma); |
| 2242 | if (root != anon_vma && atomic_dec_and_test(&root->refcount)) |
| 2243 | anon_vma_free(root); |
| 2244 | } |
| 2245 | |
| 2246 | static struct anon_vma *rmap_walk_anon_lock(struct folio *folio, |
| 2247 | const struct rmap_walk_control *rwc) |
| 2248 | { |
| 2249 | struct anon_vma *anon_vma; |
| 2250 | |
| 2251 | if (rwc->anon_lock) |
| 2252 | return rwc->anon_lock(folio); |
| 2253 | |
| 2254 | /* |
| 2255 | * Note: remove_migration_ptes() cannot use folio_lock_anon_vma_read() |
| 2256 | * because that depends on page_mapped(); but not all its usages |
| 2257 | * are holding mmap_lock. Users without mmap_lock are required to |
| 2258 | * take a reference count to prevent the anon_vma disappearing |
| 2259 | */ |
| 2260 | anon_vma = folio_anon_vma(folio); |
| 2261 | if (!anon_vma) |
| 2262 | return NULL; |
| 2263 | |
| 2264 | anon_vma_lock_read(anon_vma); |
| 2265 | return anon_vma; |
| 2266 | } |
| 2267 | |
| 2268 | /* |
| 2269 | * rmap_walk_anon - do something to anonymous page using the object-based |
| 2270 | * rmap method |
| 2271 | * @page: the page to be handled |
| 2272 | * @rwc: control variable according to each walk type |
| 2273 | * |
| 2274 | * Find all the mappings of a page using the mapping pointer and the vma chains |
| 2275 | * contained in the anon_vma struct it points to. |
| 2276 | */ |
| 2277 | static void rmap_walk_anon(struct folio *folio, |
| 2278 | const struct rmap_walk_control *rwc, bool locked) |
| 2279 | { |
| 2280 | struct anon_vma *anon_vma; |
| 2281 | pgoff_t pgoff_start, pgoff_end; |
| 2282 | struct anon_vma_chain *avc; |
| 2283 | |
| 2284 | if (locked) { |
| 2285 | anon_vma = folio_anon_vma(folio); |
| 2286 | /* anon_vma disappear under us? */ |
| 2287 | VM_BUG_ON_FOLIO(!anon_vma, folio); |
| 2288 | } else { |
| 2289 | anon_vma = rmap_walk_anon_lock(folio, rwc); |
| 2290 | } |
| 2291 | if (!anon_vma) |
| 2292 | return; |
| 2293 | |
| 2294 | pgoff_start = folio_pgoff(folio); |
| 2295 | pgoff_end = pgoff_start + folio_nr_pages(folio) - 1; |
| 2296 | anon_vma_interval_tree_foreach(avc, &anon_vma->rb_root, |
| 2297 | pgoff_start, pgoff_end) { |
| 2298 | struct vm_area_struct *vma = avc->vma; |
| 2299 | unsigned long address = vma_address(&folio->page, vma); |
| 2300 | |
| 2301 | VM_BUG_ON_VMA(address == -EFAULT, vma); |
| 2302 | cond_resched(); |
| 2303 | |
| 2304 | if (rwc->invalid_vma && rwc->invalid_vma(vma, rwc->arg)) |
| 2305 | continue; |
| 2306 | |
| 2307 | if (!rwc->rmap_one(folio, vma, address, rwc->arg)) |
| 2308 | break; |
| 2309 | if (rwc->done && rwc->done(folio)) |
| 2310 | break; |
| 2311 | } |
| 2312 | |
| 2313 | if (!locked) |
| 2314 | anon_vma_unlock_read(anon_vma); |
| 2315 | } |
| 2316 | |
| 2317 | /* |
| 2318 | * rmap_walk_file - do something to file page using the object-based rmap method |
| 2319 | * @page: the page to be handled |
| 2320 | * @rwc: control variable according to each walk type |
| 2321 | * |
| 2322 | * Find all the mappings of a page using the mapping pointer and the vma chains |
| 2323 | * contained in the address_space struct it points to. |
| 2324 | */ |
| 2325 | static void rmap_walk_file(struct folio *folio, |
| 2326 | const struct rmap_walk_control *rwc, bool locked) |
| 2327 | { |
| 2328 | struct address_space *mapping = folio_mapping(folio); |
| 2329 | pgoff_t pgoff_start, pgoff_end; |
| 2330 | struct vm_area_struct *vma; |
| 2331 | |
| 2332 | /* |
| 2333 | * The page lock not only makes sure that page->mapping cannot |
| 2334 | * suddenly be NULLified by truncation, it makes sure that the |
| 2335 | * structure at mapping cannot be freed and reused yet, |
| 2336 | * so we can safely take mapping->i_mmap_rwsem. |
| 2337 | */ |
| 2338 | VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio); |
| 2339 | |
| 2340 | if (!mapping) |
| 2341 | return; |
| 2342 | |
| 2343 | pgoff_start = folio_pgoff(folio); |
| 2344 | pgoff_end = pgoff_start + folio_nr_pages(folio) - 1; |
| 2345 | if (!locked) |
| 2346 | i_mmap_lock_read(mapping); |
| 2347 | vma_interval_tree_foreach(vma, &mapping->i_mmap, |
| 2348 | pgoff_start, pgoff_end) { |
| 2349 | unsigned long address = vma_address(&folio->page, vma); |
| 2350 | |
| 2351 | VM_BUG_ON_VMA(address == -EFAULT, vma); |
| 2352 | cond_resched(); |
| 2353 | |
| 2354 | if (rwc->invalid_vma && rwc->invalid_vma(vma, rwc->arg)) |
| 2355 | continue; |
| 2356 | |
| 2357 | if (!rwc->rmap_one(folio, vma, address, rwc->arg)) |
| 2358 | goto done; |
| 2359 | if (rwc->done && rwc->done(folio)) |
| 2360 | goto done; |
| 2361 | } |
| 2362 | |
| 2363 | done: |
| 2364 | if (!locked) |
| 2365 | i_mmap_unlock_read(mapping); |
| 2366 | } |
| 2367 | |
| 2368 | void rmap_walk(struct folio *folio, const struct rmap_walk_control *rwc) |
| 2369 | { |
| 2370 | if (unlikely(folio_test_ksm(folio))) |
| 2371 | rmap_walk_ksm(folio, rwc); |
| 2372 | else if (folio_test_anon(folio)) |
| 2373 | rmap_walk_anon(folio, rwc, false); |
| 2374 | else |
| 2375 | rmap_walk_file(folio, rwc, false); |
| 2376 | } |
| 2377 | |
| 2378 | /* Like rmap_walk, but caller holds relevant rmap lock */ |
| 2379 | void rmap_walk_locked(struct folio *folio, const struct rmap_walk_control *rwc) |
| 2380 | { |
| 2381 | /* no ksm support for now */ |
| 2382 | VM_BUG_ON_FOLIO(folio_test_ksm(folio), folio); |
| 2383 | if (folio_test_anon(folio)) |
| 2384 | rmap_walk_anon(folio, rwc, true); |
| 2385 | else |
| 2386 | rmap_walk_file(folio, rwc, true); |
| 2387 | } |
| 2388 | |
| 2389 | #ifdef CONFIG_HUGETLB_PAGE |
| 2390 | /* |
| 2391 | * The following two functions are for anonymous (private mapped) hugepages. |
| 2392 | * Unlike common anonymous pages, anonymous hugepages have no accounting code |
| 2393 | * and no lru code, because we handle hugepages differently from common pages. |
| 2394 | * |
| 2395 | * RMAP_COMPOUND is ignored. |
| 2396 | */ |
| 2397 | void hugepage_add_anon_rmap(struct page *page, struct vm_area_struct *vma, |
| 2398 | unsigned long address, rmap_t flags) |
| 2399 | { |
| 2400 | struct anon_vma *anon_vma = vma->anon_vma; |
| 2401 | int first; |
| 2402 | |
| 2403 | BUG_ON(!PageLocked(page)); |
| 2404 | BUG_ON(!anon_vma); |
| 2405 | /* address might be in next vma when migration races vma_adjust */ |
| 2406 | first = atomic_inc_and_test(compound_mapcount_ptr(page)); |
| 2407 | if (first) |
| 2408 | __page_set_anon_rmap(page, vma, address, |
| 2409 | !!(flags & RMAP_EXCLUSIVE)); |
| 2410 | } |
| 2411 | |
| 2412 | void hugepage_add_new_anon_rmap(struct page *page, |
| 2413 | struct vm_area_struct *vma, unsigned long address) |
| 2414 | { |
| 2415 | BUG_ON(address < vma->vm_start || address >= vma->vm_end); |
| 2416 | atomic_set(compound_mapcount_ptr(page), 0); |
| 2417 | atomic_set(compound_pincount_ptr(page), 0); |
| 2418 | |
| 2419 | __page_set_anon_rmap(page, vma, address, 1); |
| 2420 | } |
| 2421 | #endif /* CONFIG_HUGETLB_PAGE */ |