| 1 | // SPDX-License-Identifier: GPL-2.0-only |
| 2 | #include <linux/kernel.h> |
| 3 | #include <linux/errno.h> |
| 4 | #include <linux/err.h> |
| 5 | #include <linux/spinlock.h> |
| 6 | |
| 7 | #include <linux/mm.h> |
| 8 | #include <linux/memremap.h> |
| 9 | #include <linux/pagemap.h> |
| 10 | #include <linux/rmap.h> |
| 11 | #include <linux/swap.h> |
| 12 | #include <linux/swapops.h> |
| 13 | #include <linux/secretmem.h> |
| 14 | |
| 15 | #include <linux/sched/signal.h> |
| 16 | #include <linux/rwsem.h> |
| 17 | #include <linux/hugetlb.h> |
| 18 | #include <linux/migrate.h> |
| 19 | #include <linux/mm_inline.h> |
| 20 | #include <linux/sched/mm.h> |
| 21 | |
| 22 | #include <asm/mmu_context.h> |
| 23 | #include <asm/tlbflush.h> |
| 24 | |
| 25 | #include "internal.h" |
| 26 | |
| 27 | struct follow_page_context { |
| 28 | struct dev_pagemap *pgmap; |
| 29 | unsigned int page_mask; |
| 30 | }; |
| 31 | |
| 32 | static inline void sanity_check_pinned_pages(struct page **pages, |
| 33 | unsigned long npages) |
| 34 | { |
| 35 | if (!IS_ENABLED(CONFIG_DEBUG_VM)) |
| 36 | return; |
| 37 | |
| 38 | /* |
| 39 | * We only pin anonymous pages if they are exclusive. Once pinned, we |
| 40 | * can no longer turn them possibly shared and PageAnonExclusive() will |
| 41 | * stick around until the page is freed. |
| 42 | * |
| 43 | * We'd like to verify that our pinned anonymous pages are still mapped |
| 44 | * exclusively. The issue with anon THP is that we don't know how |
| 45 | * they are/were mapped when pinning them. However, for anon |
| 46 | * THP we can assume that either the given page (PTE-mapped THP) or |
| 47 | * the head page (PMD-mapped THP) should be PageAnonExclusive(). If |
| 48 | * neither is the case, there is certainly something wrong. |
| 49 | */ |
| 50 | for (; npages; npages--, pages++) { |
| 51 | struct page *page = *pages; |
| 52 | struct folio *folio = page_folio(page); |
| 53 | |
| 54 | if (!folio_test_anon(folio)) |
| 55 | continue; |
| 56 | if (!folio_test_large(folio) || folio_test_hugetlb(folio)) |
| 57 | VM_BUG_ON_PAGE(!PageAnonExclusive(&folio->page), page); |
| 58 | else |
| 59 | /* Either a PTE-mapped or a PMD-mapped THP. */ |
| 60 | VM_BUG_ON_PAGE(!PageAnonExclusive(&folio->page) && |
| 61 | !PageAnonExclusive(page), page); |
| 62 | } |
| 63 | } |
| 64 | |
| 65 | /* |
| 66 | * Return the folio with ref appropriately incremented, |
| 67 | * or NULL if that failed. |
| 68 | */ |
| 69 | static inline struct folio *try_get_folio(struct page *page, int refs) |
| 70 | { |
| 71 | struct folio *folio; |
| 72 | |
| 73 | retry: |
| 74 | folio = page_folio(page); |
| 75 | if (WARN_ON_ONCE(folio_ref_count(folio) < 0)) |
| 76 | return NULL; |
| 77 | if (unlikely(!folio_ref_try_add_rcu(folio, refs))) |
| 78 | return NULL; |
| 79 | |
| 80 | /* |
| 81 | * At this point we have a stable reference to the folio; but it |
| 82 | * could be that between calling page_folio() and the refcount |
| 83 | * increment, the folio was split, in which case we'd end up |
| 84 | * holding a reference on a folio that has nothing to do with the page |
| 85 | * we were given anymore. |
| 86 | * So now that the folio is stable, recheck that the page still |
| 87 | * belongs to this folio. |
| 88 | */ |
| 89 | if (unlikely(page_folio(page) != folio)) { |
| 90 | if (!put_devmap_managed_page_refs(&folio->page, refs)) |
| 91 | folio_put_refs(folio, refs); |
| 92 | goto retry; |
| 93 | } |
| 94 | |
| 95 | return folio; |
| 96 | } |
| 97 | |
| 98 | /** |
| 99 | * try_grab_folio() - Attempt to get or pin a folio. |
| 100 | * @page: pointer to page to be grabbed |
| 101 | * @refs: the value to (effectively) add to the folio's refcount |
| 102 | * @flags: gup flags: these are the FOLL_* flag values. |
| 103 | * |
| 104 | * "grab" names in this file mean, "look at flags to decide whether to use |
| 105 | * FOLL_PIN or FOLL_GET behavior, when incrementing the folio's refcount. |
| 106 | * |
| 107 | * Either FOLL_PIN or FOLL_GET (or neither) must be set, but not both at the |
| 108 | * same time. (That's true throughout the get_user_pages*() and |
| 109 | * pin_user_pages*() APIs.) Cases: |
| 110 | * |
| 111 | * FOLL_GET: folio's refcount will be incremented by @refs. |
| 112 | * |
| 113 | * FOLL_PIN on large folios: folio's refcount will be incremented by |
| 114 | * @refs, and its compound_pincount will be incremented by @refs. |
| 115 | * |
| 116 | * FOLL_PIN on single-page folios: folio's refcount will be incremented by |
| 117 | * @refs * GUP_PIN_COUNTING_BIAS. |
| 118 | * |
| 119 | * Return: The folio containing @page (with refcount appropriately |
| 120 | * incremented) for success, or NULL upon failure. If neither FOLL_GET |
| 121 | * nor FOLL_PIN was set, that's considered failure, and furthermore, |
| 122 | * a likely bug in the caller, so a warning is also emitted. |
| 123 | */ |
| 124 | struct folio *try_grab_folio(struct page *page, int refs, unsigned int flags) |
| 125 | { |
| 126 | if (flags & FOLL_GET) |
| 127 | return try_get_folio(page, refs); |
| 128 | else if (flags & FOLL_PIN) { |
| 129 | struct folio *folio; |
| 130 | |
| 131 | /* |
| 132 | * Can't do FOLL_LONGTERM + FOLL_PIN gup fast path if not in a |
| 133 | * right zone, so fail and let the caller fall back to the slow |
| 134 | * path. |
| 135 | */ |
| 136 | if (unlikely((flags & FOLL_LONGTERM) && |
| 137 | !is_longterm_pinnable_page(page))) |
| 138 | return NULL; |
| 139 | |
| 140 | /* |
| 141 | * CAUTION: Don't use compound_head() on the page before this |
| 142 | * point, the result won't be stable. |
| 143 | */ |
| 144 | folio = try_get_folio(page, refs); |
| 145 | if (!folio) |
| 146 | return NULL; |
| 147 | |
| 148 | /* |
| 149 | * When pinning a large folio, use an exact count to track it. |
| 150 | * |
| 151 | * However, be sure to *also* increment the normal folio |
| 152 | * refcount field at least once, so that the folio really |
| 153 | * is pinned. That's why the refcount from the earlier |
| 154 | * try_get_folio() is left intact. |
| 155 | */ |
| 156 | if (folio_test_large(folio)) |
| 157 | atomic_add(refs, folio_pincount_ptr(folio)); |
| 158 | else |
| 159 | folio_ref_add(folio, |
| 160 | refs * (GUP_PIN_COUNTING_BIAS - 1)); |
| 161 | /* |
| 162 | * Adjust the pincount before re-checking the PTE for changes. |
| 163 | * This is essentially a smp_mb() and is paired with a memory |
| 164 | * barrier in page_try_share_anon_rmap(). |
| 165 | */ |
| 166 | smp_mb__after_atomic(); |
| 167 | |
| 168 | node_stat_mod_folio(folio, NR_FOLL_PIN_ACQUIRED, refs); |
| 169 | |
| 170 | return folio; |
| 171 | } |
| 172 | |
| 173 | WARN_ON_ONCE(1); |
| 174 | return NULL; |
| 175 | } |
| 176 | |
| 177 | static void gup_put_folio(struct folio *folio, int refs, unsigned int flags) |
| 178 | { |
| 179 | if (flags & FOLL_PIN) { |
| 180 | node_stat_mod_folio(folio, NR_FOLL_PIN_RELEASED, refs); |
| 181 | if (folio_test_large(folio)) |
| 182 | atomic_sub(refs, folio_pincount_ptr(folio)); |
| 183 | else |
| 184 | refs *= GUP_PIN_COUNTING_BIAS; |
| 185 | } |
| 186 | |
| 187 | if (!put_devmap_managed_page_refs(&folio->page, refs)) |
| 188 | folio_put_refs(folio, refs); |
| 189 | } |
| 190 | |
| 191 | /** |
| 192 | * try_grab_page() - elevate a page's refcount by a flag-dependent amount |
| 193 | * @page: pointer to page to be grabbed |
| 194 | * @flags: gup flags: these are the FOLL_* flag values. |
| 195 | * |
| 196 | * This might not do anything at all, depending on the flags argument. |
| 197 | * |
| 198 | * "grab" names in this file mean, "look at flags to decide whether to use |
| 199 | * FOLL_PIN or FOLL_GET behavior, when incrementing the page's refcount. |
| 200 | * |
| 201 | * Either FOLL_PIN or FOLL_GET (or neither) may be set, but not both at the same |
| 202 | * time. Cases: please see the try_grab_folio() documentation, with |
| 203 | * "refs=1". |
| 204 | * |
| 205 | * Return: true for success, or if no action was required (if neither FOLL_PIN |
| 206 | * nor FOLL_GET was set, nothing is done). False for failure: FOLL_GET or |
| 207 | * FOLL_PIN was set, but the page could not be grabbed. |
| 208 | */ |
| 209 | bool __must_check try_grab_page(struct page *page, unsigned int flags) |
| 210 | { |
| 211 | struct folio *folio = page_folio(page); |
| 212 | |
| 213 | WARN_ON_ONCE((flags & (FOLL_GET | FOLL_PIN)) == (FOLL_GET | FOLL_PIN)); |
| 214 | if (WARN_ON_ONCE(folio_ref_count(folio) <= 0)) |
| 215 | return false; |
| 216 | |
| 217 | if (flags & FOLL_GET) |
| 218 | folio_ref_inc(folio); |
| 219 | else if (flags & FOLL_PIN) { |
| 220 | /* |
| 221 | * Similar to try_grab_folio(): be sure to *also* |
| 222 | * increment the normal page refcount field at least once, |
| 223 | * so that the page really is pinned. |
| 224 | */ |
| 225 | if (folio_test_large(folio)) { |
| 226 | folio_ref_add(folio, 1); |
| 227 | atomic_add(1, folio_pincount_ptr(folio)); |
| 228 | } else { |
| 229 | folio_ref_add(folio, GUP_PIN_COUNTING_BIAS); |
| 230 | } |
| 231 | |
| 232 | node_stat_mod_folio(folio, NR_FOLL_PIN_ACQUIRED, 1); |
| 233 | } |
| 234 | |
| 235 | return true; |
| 236 | } |
| 237 | |
| 238 | /** |
| 239 | * unpin_user_page() - release a dma-pinned page |
| 240 | * @page: pointer to page to be released |
| 241 | * |
| 242 | * Pages that were pinned via pin_user_pages*() must be released via either |
| 243 | * unpin_user_page(), or one of the unpin_user_pages*() routines. This is so |
| 244 | * that such pages can be separately tracked and uniquely handled. In |
| 245 | * particular, interactions with RDMA and filesystems need special handling. |
| 246 | */ |
| 247 | void unpin_user_page(struct page *page) |
| 248 | { |
| 249 | sanity_check_pinned_pages(&page, 1); |
| 250 | gup_put_folio(page_folio(page), 1, FOLL_PIN); |
| 251 | } |
| 252 | EXPORT_SYMBOL(unpin_user_page); |
| 253 | |
| 254 | static inline struct folio *gup_folio_range_next(struct page *start, |
| 255 | unsigned long npages, unsigned long i, unsigned int *ntails) |
| 256 | { |
| 257 | struct page *next = nth_page(start, i); |
| 258 | struct folio *folio = page_folio(next); |
| 259 | unsigned int nr = 1; |
| 260 | |
| 261 | if (folio_test_large(folio)) |
| 262 | nr = min_t(unsigned int, npages - i, |
| 263 | folio_nr_pages(folio) - folio_page_idx(folio, next)); |
| 264 | |
| 265 | *ntails = nr; |
| 266 | return folio; |
| 267 | } |
| 268 | |
| 269 | static inline struct folio *gup_folio_next(struct page **list, |
| 270 | unsigned long npages, unsigned long i, unsigned int *ntails) |
| 271 | { |
| 272 | struct folio *folio = page_folio(list[i]); |
| 273 | unsigned int nr; |
| 274 | |
| 275 | for (nr = i + 1; nr < npages; nr++) { |
| 276 | if (page_folio(list[nr]) != folio) |
| 277 | break; |
| 278 | } |
| 279 | |
| 280 | *ntails = nr - i; |
| 281 | return folio; |
| 282 | } |
| 283 | |
| 284 | /** |
| 285 | * unpin_user_pages_dirty_lock() - release and optionally dirty gup-pinned pages |
| 286 | * @pages: array of pages to be maybe marked dirty, and definitely released. |
| 287 | * @npages: number of pages in the @pages array. |
| 288 | * @make_dirty: whether to mark the pages dirty |
| 289 | * |
| 290 | * "gup-pinned page" refers to a page that has had one of the get_user_pages() |
| 291 | * variants called on that page. |
| 292 | * |
| 293 | * For each page in the @pages array, make that page (or its head page, if a |
| 294 | * compound page) dirty, if @make_dirty is true, and if the page was previously |
| 295 | * listed as clean. In any case, releases all pages using unpin_user_page(), |
| 296 | * possibly via unpin_user_pages(), for the non-dirty case. |
| 297 | * |
| 298 | * Please see the unpin_user_page() documentation for details. |
| 299 | * |
| 300 | * set_page_dirty_lock() is used internally. If instead, set_page_dirty() is |
| 301 | * required, then the caller should a) verify that this is really correct, |
| 302 | * because _lock() is usually required, and b) hand code it: |
| 303 | * set_page_dirty_lock(), unpin_user_page(). |
| 304 | * |
| 305 | */ |
| 306 | void unpin_user_pages_dirty_lock(struct page **pages, unsigned long npages, |
| 307 | bool make_dirty) |
| 308 | { |
| 309 | unsigned long i; |
| 310 | struct folio *folio; |
| 311 | unsigned int nr; |
| 312 | |
| 313 | if (!make_dirty) { |
| 314 | unpin_user_pages(pages, npages); |
| 315 | return; |
| 316 | } |
| 317 | |
| 318 | sanity_check_pinned_pages(pages, npages); |
| 319 | for (i = 0; i < npages; i += nr) { |
| 320 | folio = gup_folio_next(pages, npages, i, &nr); |
| 321 | /* |
| 322 | * Checking PageDirty at this point may race with |
| 323 | * clear_page_dirty_for_io(), but that's OK. Two key |
| 324 | * cases: |
| 325 | * |
| 326 | * 1) This code sees the page as already dirty, so it |
| 327 | * skips the call to set_page_dirty(). That could happen |
| 328 | * because clear_page_dirty_for_io() called |
| 329 | * page_mkclean(), followed by set_page_dirty(). |
| 330 | * However, now the page is going to get written back, |
| 331 | * which meets the original intention of setting it |
| 332 | * dirty, so all is well: clear_page_dirty_for_io() goes |
| 333 | * on to call TestClearPageDirty(), and write the page |
| 334 | * back. |
| 335 | * |
| 336 | * 2) This code sees the page as clean, so it calls |
| 337 | * set_page_dirty(). The page stays dirty, despite being |
| 338 | * written back, so it gets written back again in the |
| 339 | * next writeback cycle. This is harmless. |
| 340 | */ |
| 341 | if (!folio_test_dirty(folio)) { |
| 342 | folio_lock(folio); |
| 343 | folio_mark_dirty(folio); |
| 344 | folio_unlock(folio); |
| 345 | } |
| 346 | gup_put_folio(folio, nr, FOLL_PIN); |
| 347 | } |
| 348 | } |
| 349 | EXPORT_SYMBOL(unpin_user_pages_dirty_lock); |
| 350 | |
| 351 | /** |
| 352 | * unpin_user_page_range_dirty_lock() - release and optionally dirty |
| 353 | * gup-pinned page range |
| 354 | * |
| 355 | * @page: the starting page of a range maybe marked dirty, and definitely released. |
| 356 | * @npages: number of consecutive pages to release. |
| 357 | * @make_dirty: whether to mark the pages dirty |
| 358 | * |
| 359 | * "gup-pinned page range" refers to a range of pages that has had one of the |
| 360 | * pin_user_pages() variants called on that page. |
| 361 | * |
| 362 | * For the page ranges defined by [page .. page+npages], make that range (or |
| 363 | * its head pages, if a compound page) dirty, if @make_dirty is true, and if the |
| 364 | * page range was previously listed as clean. |
| 365 | * |
| 366 | * set_page_dirty_lock() is used internally. If instead, set_page_dirty() is |
| 367 | * required, then the caller should a) verify that this is really correct, |
| 368 | * because _lock() is usually required, and b) hand code it: |
| 369 | * set_page_dirty_lock(), unpin_user_page(). |
| 370 | * |
| 371 | */ |
| 372 | void unpin_user_page_range_dirty_lock(struct page *page, unsigned long npages, |
| 373 | bool make_dirty) |
| 374 | { |
| 375 | unsigned long i; |
| 376 | struct folio *folio; |
| 377 | unsigned int nr; |
| 378 | |
| 379 | for (i = 0; i < npages; i += nr) { |
| 380 | folio = gup_folio_range_next(page, npages, i, &nr); |
| 381 | if (make_dirty && !folio_test_dirty(folio)) { |
| 382 | folio_lock(folio); |
| 383 | folio_mark_dirty(folio); |
| 384 | folio_unlock(folio); |
| 385 | } |
| 386 | gup_put_folio(folio, nr, FOLL_PIN); |
| 387 | } |
| 388 | } |
| 389 | EXPORT_SYMBOL(unpin_user_page_range_dirty_lock); |
| 390 | |
| 391 | static void unpin_user_pages_lockless(struct page **pages, unsigned long npages) |
| 392 | { |
| 393 | unsigned long i; |
| 394 | struct folio *folio; |
| 395 | unsigned int nr; |
| 396 | |
| 397 | /* |
| 398 | * Don't perform any sanity checks because we might have raced with |
| 399 | * fork() and some anonymous pages might now actually be shared -- |
| 400 | * which is why we're unpinning after all. |
| 401 | */ |
| 402 | for (i = 0; i < npages; i += nr) { |
| 403 | folio = gup_folio_next(pages, npages, i, &nr); |
| 404 | gup_put_folio(folio, nr, FOLL_PIN); |
| 405 | } |
| 406 | } |
| 407 | |
| 408 | /** |
| 409 | * unpin_user_pages() - release an array of gup-pinned pages. |
| 410 | * @pages: array of pages to be marked dirty and released. |
| 411 | * @npages: number of pages in the @pages array. |
| 412 | * |
| 413 | * For each page in the @pages array, release the page using unpin_user_page(). |
| 414 | * |
| 415 | * Please see the unpin_user_page() documentation for details. |
| 416 | */ |
| 417 | void unpin_user_pages(struct page **pages, unsigned long npages) |
| 418 | { |
| 419 | unsigned long i; |
| 420 | struct folio *folio; |
| 421 | unsigned int nr; |
| 422 | |
| 423 | /* |
| 424 | * If this WARN_ON() fires, then the system *might* be leaking pages (by |
| 425 | * leaving them pinned), but probably not. More likely, gup/pup returned |
| 426 | * a hard -ERRNO error to the caller, who erroneously passed it here. |
| 427 | */ |
| 428 | if (WARN_ON(IS_ERR_VALUE(npages))) |
| 429 | return; |
| 430 | |
| 431 | sanity_check_pinned_pages(pages, npages); |
| 432 | for (i = 0; i < npages; i += nr) { |
| 433 | folio = gup_folio_next(pages, npages, i, &nr); |
| 434 | gup_put_folio(folio, nr, FOLL_PIN); |
| 435 | } |
| 436 | } |
| 437 | EXPORT_SYMBOL(unpin_user_pages); |
| 438 | |
| 439 | /* |
| 440 | * Set the MMF_HAS_PINNED if not set yet; after set it'll be there for the mm's |
| 441 | * lifecycle. Avoid setting the bit unless necessary, or it might cause write |
| 442 | * cache bouncing on large SMP machines for concurrent pinned gups. |
| 443 | */ |
| 444 | static inline void mm_set_has_pinned_flag(unsigned long *mm_flags) |
| 445 | { |
| 446 | if (!test_bit(MMF_HAS_PINNED, mm_flags)) |
| 447 | set_bit(MMF_HAS_PINNED, mm_flags); |
| 448 | } |
| 449 | |
| 450 | #ifdef CONFIG_MMU |
| 451 | static struct page *no_page_table(struct vm_area_struct *vma, |
| 452 | unsigned int flags) |
| 453 | { |
| 454 | /* |
| 455 | * When core dumping an enormous anonymous area that nobody |
| 456 | * has touched so far, we don't want to allocate unnecessary pages or |
| 457 | * page tables. Return error instead of NULL to skip handle_mm_fault, |
| 458 | * then get_dump_page() will return NULL to leave a hole in the dump. |
| 459 | * But we can only make this optimization where a hole would surely |
| 460 | * be zero-filled if handle_mm_fault() actually did handle it. |
| 461 | */ |
| 462 | if ((flags & FOLL_DUMP) && |
| 463 | (vma_is_anonymous(vma) || !vma->vm_ops->fault)) |
| 464 | return ERR_PTR(-EFAULT); |
| 465 | return NULL; |
| 466 | } |
| 467 | |
| 468 | static int follow_pfn_pte(struct vm_area_struct *vma, unsigned long address, |
| 469 | pte_t *pte, unsigned int flags) |
| 470 | { |
| 471 | if (flags & FOLL_TOUCH) { |
| 472 | pte_t entry = *pte; |
| 473 | |
| 474 | if (flags & FOLL_WRITE) |
| 475 | entry = pte_mkdirty(entry); |
| 476 | entry = pte_mkyoung(entry); |
| 477 | |
| 478 | if (!pte_same(*pte, entry)) { |
| 479 | set_pte_at(vma->vm_mm, address, pte, entry); |
| 480 | update_mmu_cache(vma, address, pte); |
| 481 | } |
| 482 | } |
| 483 | |
| 484 | /* Proper page table entry exists, but no corresponding struct page */ |
| 485 | return -EEXIST; |
| 486 | } |
| 487 | |
| 488 | /* FOLL_FORCE can write to even unwritable PTEs in COW mappings. */ |
| 489 | static inline bool can_follow_write_pte(pte_t pte, struct page *page, |
| 490 | struct vm_area_struct *vma, |
| 491 | unsigned int flags) |
| 492 | { |
| 493 | /* If the pte is writable, we can write to the page. */ |
| 494 | if (pte_write(pte)) |
| 495 | return true; |
| 496 | |
| 497 | /* Maybe FOLL_FORCE is set to override it? */ |
| 498 | if (!(flags & FOLL_FORCE)) |
| 499 | return false; |
| 500 | |
| 501 | /* But FOLL_FORCE has no effect on shared mappings */ |
| 502 | if (vma->vm_flags & (VM_MAYSHARE | VM_SHARED)) |
| 503 | return false; |
| 504 | |
| 505 | /* ... or read-only private ones */ |
| 506 | if (!(vma->vm_flags & VM_MAYWRITE)) |
| 507 | return false; |
| 508 | |
| 509 | /* ... or already writable ones that just need to take a write fault */ |
| 510 | if (vma->vm_flags & VM_WRITE) |
| 511 | return false; |
| 512 | |
| 513 | /* |
| 514 | * See can_change_pte_writable(): we broke COW and could map the page |
| 515 | * writable if we have an exclusive anonymous page ... |
| 516 | */ |
| 517 | if (!page || !PageAnon(page) || !PageAnonExclusive(page)) |
| 518 | return false; |
| 519 | |
| 520 | /* ... and a write-fault isn't required for other reasons. */ |
| 521 | if (vma_soft_dirty_enabled(vma) && !pte_soft_dirty(pte)) |
| 522 | return false; |
| 523 | return !userfaultfd_pte_wp(vma, pte); |
| 524 | } |
| 525 | |
| 526 | static struct page *follow_page_pte(struct vm_area_struct *vma, |
| 527 | unsigned long address, pmd_t *pmd, unsigned int flags, |
| 528 | struct dev_pagemap **pgmap) |
| 529 | { |
| 530 | struct mm_struct *mm = vma->vm_mm; |
| 531 | struct page *page; |
| 532 | spinlock_t *ptl; |
| 533 | pte_t *ptep, pte; |
| 534 | int ret; |
| 535 | |
| 536 | /* FOLL_GET and FOLL_PIN are mutually exclusive. */ |
| 537 | if (WARN_ON_ONCE((flags & (FOLL_PIN | FOLL_GET)) == |
| 538 | (FOLL_PIN | FOLL_GET))) |
| 539 | return ERR_PTR(-EINVAL); |
| 540 | |
| 541 | /* |
| 542 | * Considering PTE level hugetlb, like continuous-PTE hugetlb on |
| 543 | * ARM64 architecture. |
| 544 | */ |
| 545 | if (is_vm_hugetlb_page(vma)) { |
| 546 | page = follow_huge_pmd_pte(vma, address, flags); |
| 547 | if (page) |
| 548 | return page; |
| 549 | return no_page_table(vma, flags); |
| 550 | } |
| 551 | |
| 552 | retry: |
| 553 | if (unlikely(pmd_bad(*pmd))) |
| 554 | return no_page_table(vma, flags); |
| 555 | |
| 556 | ptep = pte_offset_map_lock(mm, pmd, address, &ptl); |
| 557 | pte = *ptep; |
| 558 | if (!pte_present(pte)) { |
| 559 | swp_entry_t entry; |
| 560 | /* |
| 561 | * KSM's break_ksm() relies upon recognizing a ksm page |
| 562 | * even while it is being migrated, so for that case we |
| 563 | * need migration_entry_wait(). |
| 564 | */ |
| 565 | if (likely(!(flags & FOLL_MIGRATION))) |
| 566 | goto no_page; |
| 567 | if (pte_none(pte)) |
| 568 | goto no_page; |
| 569 | entry = pte_to_swp_entry(pte); |
| 570 | if (!is_migration_entry(entry)) |
| 571 | goto no_page; |
| 572 | pte_unmap_unlock(ptep, ptl); |
| 573 | migration_entry_wait(mm, pmd, address); |
| 574 | goto retry; |
| 575 | } |
| 576 | if (pte_protnone(pte) && !gup_can_follow_protnone(flags)) |
| 577 | goto no_page; |
| 578 | |
| 579 | page = vm_normal_page(vma, address, pte); |
| 580 | |
| 581 | /* |
| 582 | * We only care about anon pages in can_follow_write_pte() and don't |
| 583 | * have to worry about pte_devmap() because they are never anon. |
| 584 | */ |
| 585 | if ((flags & FOLL_WRITE) && |
| 586 | !can_follow_write_pte(pte, page, vma, flags)) { |
| 587 | page = NULL; |
| 588 | goto out; |
| 589 | } |
| 590 | |
| 591 | if (!page && pte_devmap(pte) && (flags & (FOLL_GET | FOLL_PIN))) { |
| 592 | /* |
| 593 | * Only return device mapping pages in the FOLL_GET or FOLL_PIN |
| 594 | * case since they are only valid while holding the pgmap |
| 595 | * reference. |
| 596 | */ |
| 597 | *pgmap = get_dev_pagemap(pte_pfn(pte), *pgmap); |
| 598 | if (*pgmap) |
| 599 | page = pte_page(pte); |
| 600 | else |
| 601 | goto no_page; |
| 602 | } else if (unlikely(!page)) { |
| 603 | if (flags & FOLL_DUMP) { |
| 604 | /* Avoid special (like zero) pages in core dumps */ |
| 605 | page = ERR_PTR(-EFAULT); |
| 606 | goto out; |
| 607 | } |
| 608 | |
| 609 | if (is_zero_pfn(pte_pfn(pte))) { |
| 610 | page = pte_page(pte); |
| 611 | } else { |
| 612 | ret = follow_pfn_pte(vma, address, ptep, flags); |
| 613 | page = ERR_PTR(ret); |
| 614 | goto out; |
| 615 | } |
| 616 | } |
| 617 | |
| 618 | if (!pte_write(pte) && gup_must_unshare(flags, page)) { |
| 619 | page = ERR_PTR(-EMLINK); |
| 620 | goto out; |
| 621 | } |
| 622 | |
| 623 | VM_BUG_ON_PAGE((flags & FOLL_PIN) && PageAnon(page) && |
| 624 | !PageAnonExclusive(page), page); |
| 625 | |
| 626 | /* try_grab_page() does nothing unless FOLL_GET or FOLL_PIN is set. */ |
| 627 | if (unlikely(!try_grab_page(page, flags))) { |
| 628 | page = ERR_PTR(-ENOMEM); |
| 629 | goto out; |
| 630 | } |
| 631 | /* |
| 632 | * We need to make the page accessible if and only if we are going |
| 633 | * to access its content (the FOLL_PIN case). Please see |
| 634 | * Documentation/core-api/pin_user_pages.rst for details. |
| 635 | */ |
| 636 | if (flags & FOLL_PIN) { |
| 637 | ret = arch_make_page_accessible(page); |
| 638 | if (ret) { |
| 639 | unpin_user_page(page); |
| 640 | page = ERR_PTR(ret); |
| 641 | goto out; |
| 642 | } |
| 643 | } |
| 644 | if (flags & FOLL_TOUCH) { |
| 645 | if ((flags & FOLL_WRITE) && |
| 646 | !pte_dirty(pte) && !PageDirty(page)) |
| 647 | set_page_dirty(page); |
| 648 | /* |
| 649 | * pte_mkyoung() would be more correct here, but atomic care |
| 650 | * is needed to avoid losing the dirty bit: it is easier to use |
| 651 | * mark_page_accessed(). |
| 652 | */ |
| 653 | mark_page_accessed(page); |
| 654 | } |
| 655 | out: |
| 656 | pte_unmap_unlock(ptep, ptl); |
| 657 | return page; |
| 658 | no_page: |
| 659 | pte_unmap_unlock(ptep, ptl); |
| 660 | if (!pte_none(pte)) |
| 661 | return NULL; |
| 662 | return no_page_table(vma, flags); |
| 663 | } |
| 664 | |
| 665 | static struct page *follow_pmd_mask(struct vm_area_struct *vma, |
| 666 | unsigned long address, pud_t *pudp, |
| 667 | unsigned int flags, |
| 668 | struct follow_page_context *ctx) |
| 669 | { |
| 670 | pmd_t *pmd, pmdval; |
| 671 | spinlock_t *ptl; |
| 672 | struct page *page; |
| 673 | struct mm_struct *mm = vma->vm_mm; |
| 674 | |
| 675 | pmd = pmd_offset(pudp, address); |
| 676 | /* |
| 677 | * The READ_ONCE() will stabilize the pmdval in a register or |
| 678 | * on the stack so that it will stop changing under the code. |
| 679 | */ |
| 680 | pmdval = READ_ONCE(*pmd); |
| 681 | if (pmd_none(pmdval)) |
| 682 | return no_page_table(vma, flags); |
| 683 | if (pmd_huge(pmdval) && is_vm_hugetlb_page(vma)) { |
| 684 | page = follow_huge_pmd_pte(vma, address, flags); |
| 685 | if (page) |
| 686 | return page; |
| 687 | return no_page_table(vma, flags); |
| 688 | } |
| 689 | if (is_hugepd(__hugepd(pmd_val(pmdval)))) { |
| 690 | page = follow_huge_pd(vma, address, |
| 691 | __hugepd(pmd_val(pmdval)), flags, |
| 692 | PMD_SHIFT); |
| 693 | if (page) |
| 694 | return page; |
| 695 | return no_page_table(vma, flags); |
| 696 | } |
| 697 | retry: |
| 698 | if (!pmd_present(pmdval)) { |
| 699 | /* |
| 700 | * Should never reach here, if thp migration is not supported; |
| 701 | * Otherwise, it must be a thp migration entry. |
| 702 | */ |
| 703 | VM_BUG_ON(!thp_migration_supported() || |
| 704 | !is_pmd_migration_entry(pmdval)); |
| 705 | |
| 706 | if (likely(!(flags & FOLL_MIGRATION))) |
| 707 | return no_page_table(vma, flags); |
| 708 | |
| 709 | pmd_migration_entry_wait(mm, pmd); |
| 710 | pmdval = READ_ONCE(*pmd); |
| 711 | /* |
| 712 | * MADV_DONTNEED may convert the pmd to null because |
| 713 | * mmap_lock is held in read mode |
| 714 | */ |
| 715 | if (pmd_none(pmdval)) |
| 716 | return no_page_table(vma, flags); |
| 717 | goto retry; |
| 718 | } |
| 719 | if (pmd_devmap(pmdval)) { |
| 720 | ptl = pmd_lock(mm, pmd); |
| 721 | page = follow_devmap_pmd(vma, address, pmd, flags, &ctx->pgmap); |
| 722 | spin_unlock(ptl); |
| 723 | if (page) |
| 724 | return page; |
| 725 | } |
| 726 | if (likely(!pmd_trans_huge(pmdval))) |
| 727 | return follow_page_pte(vma, address, pmd, flags, &ctx->pgmap); |
| 728 | |
| 729 | if (pmd_protnone(pmdval) && !gup_can_follow_protnone(flags)) |
| 730 | return no_page_table(vma, flags); |
| 731 | |
| 732 | retry_locked: |
| 733 | ptl = pmd_lock(mm, pmd); |
| 734 | if (unlikely(pmd_none(*pmd))) { |
| 735 | spin_unlock(ptl); |
| 736 | return no_page_table(vma, flags); |
| 737 | } |
| 738 | if (unlikely(!pmd_present(*pmd))) { |
| 739 | spin_unlock(ptl); |
| 740 | if (likely(!(flags & FOLL_MIGRATION))) |
| 741 | return no_page_table(vma, flags); |
| 742 | pmd_migration_entry_wait(mm, pmd); |
| 743 | goto retry_locked; |
| 744 | } |
| 745 | if (unlikely(!pmd_trans_huge(*pmd))) { |
| 746 | spin_unlock(ptl); |
| 747 | return follow_page_pte(vma, address, pmd, flags, &ctx->pgmap); |
| 748 | } |
| 749 | if (flags & FOLL_SPLIT_PMD) { |
| 750 | int ret; |
| 751 | page = pmd_page(*pmd); |
| 752 | if (is_huge_zero_page(page)) { |
| 753 | spin_unlock(ptl); |
| 754 | ret = 0; |
| 755 | split_huge_pmd(vma, pmd, address); |
| 756 | if (pmd_trans_unstable(pmd)) |
| 757 | ret = -EBUSY; |
| 758 | } else { |
| 759 | spin_unlock(ptl); |
| 760 | split_huge_pmd(vma, pmd, address); |
| 761 | ret = pte_alloc(mm, pmd) ? -ENOMEM : 0; |
| 762 | } |
| 763 | |
| 764 | return ret ? ERR_PTR(ret) : |
| 765 | follow_page_pte(vma, address, pmd, flags, &ctx->pgmap); |
| 766 | } |
| 767 | page = follow_trans_huge_pmd(vma, address, pmd, flags); |
| 768 | spin_unlock(ptl); |
| 769 | ctx->page_mask = HPAGE_PMD_NR - 1; |
| 770 | return page; |
| 771 | } |
| 772 | |
| 773 | static struct page *follow_pud_mask(struct vm_area_struct *vma, |
| 774 | unsigned long address, p4d_t *p4dp, |
| 775 | unsigned int flags, |
| 776 | struct follow_page_context *ctx) |
| 777 | { |
| 778 | pud_t *pud; |
| 779 | spinlock_t *ptl; |
| 780 | struct page *page; |
| 781 | struct mm_struct *mm = vma->vm_mm; |
| 782 | |
| 783 | pud = pud_offset(p4dp, address); |
| 784 | if (pud_none(*pud)) |
| 785 | return no_page_table(vma, flags); |
| 786 | if (pud_huge(*pud) && is_vm_hugetlb_page(vma)) { |
| 787 | page = follow_huge_pud(mm, address, pud, flags); |
| 788 | if (page) |
| 789 | return page; |
| 790 | return no_page_table(vma, flags); |
| 791 | } |
| 792 | if (is_hugepd(__hugepd(pud_val(*pud)))) { |
| 793 | page = follow_huge_pd(vma, address, |
| 794 | __hugepd(pud_val(*pud)), flags, |
| 795 | PUD_SHIFT); |
| 796 | if (page) |
| 797 | return page; |
| 798 | return no_page_table(vma, flags); |
| 799 | } |
| 800 | if (pud_devmap(*pud)) { |
| 801 | ptl = pud_lock(mm, pud); |
| 802 | page = follow_devmap_pud(vma, address, pud, flags, &ctx->pgmap); |
| 803 | spin_unlock(ptl); |
| 804 | if (page) |
| 805 | return page; |
| 806 | } |
| 807 | if (unlikely(pud_bad(*pud))) |
| 808 | return no_page_table(vma, flags); |
| 809 | |
| 810 | return follow_pmd_mask(vma, address, pud, flags, ctx); |
| 811 | } |
| 812 | |
| 813 | static struct page *follow_p4d_mask(struct vm_area_struct *vma, |
| 814 | unsigned long address, pgd_t *pgdp, |
| 815 | unsigned int flags, |
| 816 | struct follow_page_context *ctx) |
| 817 | { |
| 818 | p4d_t *p4d; |
| 819 | struct page *page; |
| 820 | |
| 821 | p4d = p4d_offset(pgdp, address); |
| 822 | if (p4d_none(*p4d)) |
| 823 | return no_page_table(vma, flags); |
| 824 | BUILD_BUG_ON(p4d_huge(*p4d)); |
| 825 | if (unlikely(p4d_bad(*p4d))) |
| 826 | return no_page_table(vma, flags); |
| 827 | |
| 828 | if (is_hugepd(__hugepd(p4d_val(*p4d)))) { |
| 829 | page = follow_huge_pd(vma, address, |
| 830 | __hugepd(p4d_val(*p4d)), flags, |
| 831 | P4D_SHIFT); |
| 832 | if (page) |
| 833 | return page; |
| 834 | return no_page_table(vma, flags); |
| 835 | } |
| 836 | return follow_pud_mask(vma, address, p4d, flags, ctx); |
| 837 | } |
| 838 | |
| 839 | /** |
| 840 | * follow_page_mask - look up a page descriptor from a user-virtual address |
| 841 | * @vma: vm_area_struct mapping @address |
| 842 | * @address: virtual address to look up |
| 843 | * @flags: flags modifying lookup behaviour |
| 844 | * @ctx: contains dev_pagemap for %ZONE_DEVICE memory pinning and a |
| 845 | * pointer to output page_mask |
| 846 | * |
| 847 | * @flags can have FOLL_ flags set, defined in <linux/mm.h> |
| 848 | * |
| 849 | * When getting pages from ZONE_DEVICE memory, the @ctx->pgmap caches |
| 850 | * the device's dev_pagemap metadata to avoid repeating expensive lookups. |
| 851 | * |
| 852 | * When getting an anonymous page and the caller has to trigger unsharing |
| 853 | * of a shared anonymous page first, -EMLINK is returned. The caller should |
| 854 | * trigger a fault with FAULT_FLAG_UNSHARE set. Note that unsharing is only |
| 855 | * relevant with FOLL_PIN and !FOLL_WRITE. |
| 856 | * |
| 857 | * On output, the @ctx->page_mask is set according to the size of the page. |
| 858 | * |
| 859 | * Return: the mapped (struct page *), %NULL if no mapping exists, or |
| 860 | * an error pointer if there is a mapping to something not represented |
| 861 | * by a page descriptor (see also vm_normal_page()). |
| 862 | */ |
| 863 | static struct page *follow_page_mask(struct vm_area_struct *vma, |
| 864 | unsigned long address, unsigned int flags, |
| 865 | struct follow_page_context *ctx) |
| 866 | { |
| 867 | pgd_t *pgd; |
| 868 | struct page *page; |
| 869 | struct mm_struct *mm = vma->vm_mm; |
| 870 | |
| 871 | ctx->page_mask = 0; |
| 872 | |
| 873 | /* make this handle hugepd */ |
| 874 | page = follow_huge_addr(mm, address, flags & FOLL_WRITE); |
| 875 | if (!IS_ERR(page)) { |
| 876 | WARN_ON_ONCE(flags & (FOLL_GET | FOLL_PIN)); |
| 877 | return page; |
| 878 | } |
| 879 | |
| 880 | pgd = pgd_offset(mm, address); |
| 881 | |
| 882 | if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd))) |
| 883 | return no_page_table(vma, flags); |
| 884 | |
| 885 | if (pgd_huge(*pgd)) { |
| 886 | page = follow_huge_pgd(mm, address, pgd, flags); |
| 887 | if (page) |
| 888 | return page; |
| 889 | return no_page_table(vma, flags); |
| 890 | } |
| 891 | if (is_hugepd(__hugepd(pgd_val(*pgd)))) { |
| 892 | page = follow_huge_pd(vma, address, |
| 893 | __hugepd(pgd_val(*pgd)), flags, |
| 894 | PGDIR_SHIFT); |
| 895 | if (page) |
| 896 | return page; |
| 897 | return no_page_table(vma, flags); |
| 898 | } |
| 899 | |
| 900 | return follow_p4d_mask(vma, address, pgd, flags, ctx); |
| 901 | } |
| 902 | |
| 903 | struct page *follow_page(struct vm_area_struct *vma, unsigned long address, |
| 904 | unsigned int foll_flags) |
| 905 | { |
| 906 | struct follow_page_context ctx = { NULL }; |
| 907 | struct page *page; |
| 908 | |
| 909 | if (vma_is_secretmem(vma)) |
| 910 | return NULL; |
| 911 | |
| 912 | if (foll_flags & FOLL_PIN) |
| 913 | return NULL; |
| 914 | |
| 915 | page = follow_page_mask(vma, address, foll_flags, &ctx); |
| 916 | if (ctx.pgmap) |
| 917 | put_dev_pagemap(ctx.pgmap); |
| 918 | return page; |
| 919 | } |
| 920 | |
| 921 | static int get_gate_page(struct mm_struct *mm, unsigned long address, |
| 922 | unsigned int gup_flags, struct vm_area_struct **vma, |
| 923 | struct page **page) |
| 924 | { |
| 925 | pgd_t *pgd; |
| 926 | p4d_t *p4d; |
| 927 | pud_t *pud; |
| 928 | pmd_t *pmd; |
| 929 | pte_t *pte; |
| 930 | int ret = -EFAULT; |
| 931 | |
| 932 | /* user gate pages are read-only */ |
| 933 | if (gup_flags & FOLL_WRITE) |
| 934 | return -EFAULT; |
| 935 | if (address > TASK_SIZE) |
| 936 | pgd = pgd_offset_k(address); |
| 937 | else |
| 938 | pgd = pgd_offset_gate(mm, address); |
| 939 | if (pgd_none(*pgd)) |
| 940 | return -EFAULT; |
| 941 | p4d = p4d_offset(pgd, address); |
| 942 | if (p4d_none(*p4d)) |
| 943 | return -EFAULT; |
| 944 | pud = pud_offset(p4d, address); |
| 945 | if (pud_none(*pud)) |
| 946 | return -EFAULT; |
| 947 | pmd = pmd_offset(pud, address); |
| 948 | if (!pmd_present(*pmd)) |
| 949 | return -EFAULT; |
| 950 | VM_BUG_ON(pmd_trans_huge(*pmd)); |
| 951 | pte = pte_offset_map(pmd, address); |
| 952 | if (pte_none(*pte)) |
| 953 | goto unmap; |
| 954 | *vma = get_gate_vma(mm); |
| 955 | if (!page) |
| 956 | goto out; |
| 957 | *page = vm_normal_page(*vma, address, *pte); |
| 958 | if (!*page) { |
| 959 | if ((gup_flags & FOLL_DUMP) || !is_zero_pfn(pte_pfn(*pte))) |
| 960 | goto unmap; |
| 961 | *page = pte_page(*pte); |
| 962 | } |
| 963 | if (unlikely(!try_grab_page(*page, gup_flags))) { |
| 964 | ret = -ENOMEM; |
| 965 | goto unmap; |
| 966 | } |
| 967 | out: |
| 968 | ret = 0; |
| 969 | unmap: |
| 970 | pte_unmap(pte); |
| 971 | return ret; |
| 972 | } |
| 973 | |
| 974 | /* |
| 975 | * mmap_lock must be held on entry. If @locked != NULL and *@flags |
| 976 | * does not include FOLL_NOWAIT, the mmap_lock may be released. If it |
| 977 | * is, *@locked will be set to 0 and -EBUSY returned. |
| 978 | */ |
| 979 | static int faultin_page(struct vm_area_struct *vma, |
| 980 | unsigned long address, unsigned int *flags, bool unshare, |
| 981 | int *locked) |
| 982 | { |
| 983 | unsigned int fault_flags = 0; |
| 984 | vm_fault_t ret; |
| 985 | |
| 986 | if (*flags & FOLL_NOFAULT) |
| 987 | return -EFAULT; |
| 988 | if (*flags & FOLL_WRITE) |
| 989 | fault_flags |= FAULT_FLAG_WRITE; |
| 990 | if (*flags & FOLL_REMOTE) |
| 991 | fault_flags |= FAULT_FLAG_REMOTE; |
| 992 | if (locked) |
| 993 | fault_flags |= FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE; |
| 994 | if (*flags & FOLL_NOWAIT) |
| 995 | fault_flags |= FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_RETRY_NOWAIT; |
| 996 | if (*flags & FOLL_TRIED) { |
| 997 | /* |
| 998 | * Note: FAULT_FLAG_ALLOW_RETRY and FAULT_FLAG_TRIED |
| 999 | * can co-exist |
| 1000 | */ |
| 1001 | fault_flags |= FAULT_FLAG_TRIED; |
| 1002 | } |
| 1003 | if (unshare) { |
| 1004 | fault_flags |= FAULT_FLAG_UNSHARE; |
| 1005 | /* FAULT_FLAG_WRITE and FAULT_FLAG_UNSHARE are incompatible */ |
| 1006 | VM_BUG_ON(fault_flags & FAULT_FLAG_WRITE); |
| 1007 | } |
| 1008 | |
| 1009 | ret = handle_mm_fault(vma, address, fault_flags, NULL); |
| 1010 | |
| 1011 | if (ret & VM_FAULT_COMPLETED) { |
| 1012 | /* |
| 1013 | * With FAULT_FLAG_RETRY_NOWAIT we'll never release the |
| 1014 | * mmap lock in the page fault handler. Sanity check this. |
| 1015 | */ |
| 1016 | WARN_ON_ONCE(fault_flags & FAULT_FLAG_RETRY_NOWAIT); |
| 1017 | if (locked) |
| 1018 | *locked = 0; |
| 1019 | /* |
| 1020 | * We should do the same as VM_FAULT_RETRY, but let's not |
| 1021 | * return -EBUSY since that's not reflecting the reality of |
| 1022 | * what has happened - we've just fully completed a page |
| 1023 | * fault, with the mmap lock released. Use -EAGAIN to show |
| 1024 | * that we want to take the mmap lock _again_. |
| 1025 | */ |
| 1026 | return -EAGAIN; |
| 1027 | } |
| 1028 | |
| 1029 | if (ret & VM_FAULT_ERROR) { |
| 1030 | int err = vm_fault_to_errno(ret, *flags); |
| 1031 | |
| 1032 | if (err) |
| 1033 | return err; |
| 1034 | BUG(); |
| 1035 | } |
| 1036 | |
| 1037 | if (ret & VM_FAULT_RETRY) { |
| 1038 | if (locked && !(fault_flags & FAULT_FLAG_RETRY_NOWAIT)) |
| 1039 | *locked = 0; |
| 1040 | return -EBUSY; |
| 1041 | } |
| 1042 | |
| 1043 | return 0; |
| 1044 | } |
| 1045 | |
| 1046 | static int check_vma_flags(struct vm_area_struct *vma, unsigned long gup_flags) |
| 1047 | { |
| 1048 | vm_flags_t vm_flags = vma->vm_flags; |
| 1049 | int write = (gup_flags & FOLL_WRITE); |
| 1050 | int foreign = (gup_flags & FOLL_REMOTE); |
| 1051 | |
| 1052 | if (vm_flags & (VM_IO | VM_PFNMAP)) |
| 1053 | return -EFAULT; |
| 1054 | |
| 1055 | if (gup_flags & FOLL_ANON && !vma_is_anonymous(vma)) |
| 1056 | return -EFAULT; |
| 1057 | |
| 1058 | if ((gup_flags & FOLL_LONGTERM) && vma_is_fsdax(vma)) |
| 1059 | return -EOPNOTSUPP; |
| 1060 | |
| 1061 | if (vma_is_secretmem(vma)) |
| 1062 | return -EFAULT; |
| 1063 | |
| 1064 | if (write) { |
| 1065 | if (!(vm_flags & VM_WRITE)) { |
| 1066 | if (!(gup_flags & FOLL_FORCE)) |
| 1067 | return -EFAULT; |
| 1068 | /* |
| 1069 | * We used to let the write,force case do COW in a |
| 1070 | * VM_MAYWRITE VM_SHARED !VM_WRITE vma, so ptrace could |
| 1071 | * set a breakpoint in a read-only mapping of an |
| 1072 | * executable, without corrupting the file (yet only |
| 1073 | * when that file had been opened for writing!). |
| 1074 | * Anon pages in shared mappings are surprising: now |
| 1075 | * just reject it. |
| 1076 | */ |
| 1077 | if (!is_cow_mapping(vm_flags)) |
| 1078 | return -EFAULT; |
| 1079 | } |
| 1080 | } else if (!(vm_flags & VM_READ)) { |
| 1081 | if (!(gup_flags & FOLL_FORCE)) |
| 1082 | return -EFAULT; |
| 1083 | /* |
| 1084 | * Is there actually any vma we can reach here which does not |
| 1085 | * have VM_MAYREAD set? |
| 1086 | */ |
| 1087 | if (!(vm_flags & VM_MAYREAD)) |
| 1088 | return -EFAULT; |
| 1089 | } |
| 1090 | /* |
| 1091 | * gups are always data accesses, not instruction |
| 1092 | * fetches, so execute=false here |
| 1093 | */ |
| 1094 | if (!arch_vma_access_permitted(vma, write, false, foreign)) |
| 1095 | return -EFAULT; |
| 1096 | return 0; |
| 1097 | } |
| 1098 | |
| 1099 | /** |
| 1100 | * __get_user_pages() - pin user pages in memory |
| 1101 | * @mm: mm_struct of target mm |
| 1102 | * @start: starting user address |
| 1103 | * @nr_pages: number of pages from start to pin |
| 1104 | * @gup_flags: flags modifying pin behaviour |
| 1105 | * @pages: array that receives pointers to the pages pinned. |
| 1106 | * Should be at least nr_pages long. Or NULL, if caller |
| 1107 | * only intends to ensure the pages are faulted in. |
| 1108 | * @vmas: array of pointers to vmas corresponding to each page. |
| 1109 | * Or NULL if the caller does not require them. |
| 1110 | * @locked: whether we're still with the mmap_lock held |
| 1111 | * |
| 1112 | * Returns either number of pages pinned (which may be less than the |
| 1113 | * number requested), or an error. Details about the return value: |
| 1114 | * |
| 1115 | * -- If nr_pages is 0, returns 0. |
| 1116 | * -- If nr_pages is >0, but no pages were pinned, returns -errno. |
| 1117 | * -- If nr_pages is >0, and some pages were pinned, returns the number of |
| 1118 | * pages pinned. Again, this may be less than nr_pages. |
| 1119 | * -- 0 return value is possible when the fault would need to be retried. |
| 1120 | * |
| 1121 | * The caller is responsible for releasing returned @pages, via put_page(). |
| 1122 | * |
| 1123 | * @vmas are valid only as long as mmap_lock is held. |
| 1124 | * |
| 1125 | * Must be called with mmap_lock held. It may be released. See below. |
| 1126 | * |
| 1127 | * __get_user_pages walks a process's page tables and takes a reference to |
| 1128 | * each struct page that each user address corresponds to at a given |
| 1129 | * instant. That is, it takes the page that would be accessed if a user |
| 1130 | * thread accesses the given user virtual address at that instant. |
| 1131 | * |
| 1132 | * This does not guarantee that the page exists in the user mappings when |
| 1133 | * __get_user_pages returns, and there may even be a completely different |
| 1134 | * page there in some cases (eg. if mmapped pagecache has been invalidated |
| 1135 | * and subsequently re faulted). However it does guarantee that the page |
| 1136 | * won't be freed completely. And mostly callers simply care that the page |
| 1137 | * contains data that was valid *at some point in time*. Typically, an IO |
| 1138 | * or similar operation cannot guarantee anything stronger anyway because |
| 1139 | * locks can't be held over the syscall boundary. |
| 1140 | * |
| 1141 | * If @gup_flags & FOLL_WRITE == 0, the page must not be written to. If |
| 1142 | * the page is written to, set_page_dirty (or set_page_dirty_lock, as |
| 1143 | * appropriate) must be called after the page is finished with, and |
| 1144 | * before put_page is called. |
| 1145 | * |
| 1146 | * If @locked != NULL, *@locked will be set to 0 when mmap_lock is |
| 1147 | * released by an up_read(). That can happen if @gup_flags does not |
| 1148 | * have FOLL_NOWAIT. |
| 1149 | * |
| 1150 | * A caller using such a combination of @locked and @gup_flags |
| 1151 | * must therefore hold the mmap_lock for reading only, and recognize |
| 1152 | * when it's been released. Otherwise, it must be held for either |
| 1153 | * reading or writing and will not be released. |
| 1154 | * |
| 1155 | * In most cases, get_user_pages or get_user_pages_fast should be used |
| 1156 | * instead of __get_user_pages. __get_user_pages should be used only if |
| 1157 | * you need some special @gup_flags. |
| 1158 | */ |
| 1159 | static long __get_user_pages(struct mm_struct *mm, |
| 1160 | unsigned long start, unsigned long nr_pages, |
| 1161 | unsigned int gup_flags, struct page **pages, |
| 1162 | struct vm_area_struct **vmas, int *locked) |
| 1163 | { |
| 1164 | long ret = 0, i = 0; |
| 1165 | struct vm_area_struct *vma = NULL; |
| 1166 | struct follow_page_context ctx = { NULL }; |
| 1167 | |
| 1168 | if (!nr_pages) |
| 1169 | return 0; |
| 1170 | |
| 1171 | start = untagged_addr(start); |
| 1172 | |
| 1173 | VM_BUG_ON(!!pages != !!(gup_flags & (FOLL_GET | FOLL_PIN))); |
| 1174 | |
| 1175 | do { |
| 1176 | struct page *page; |
| 1177 | unsigned int foll_flags = gup_flags; |
| 1178 | unsigned int page_increm; |
| 1179 | |
| 1180 | /* first iteration or cross vma bound */ |
| 1181 | if (!vma || start >= vma->vm_end) { |
| 1182 | vma = find_extend_vma(mm, start); |
| 1183 | if (!vma && in_gate_area(mm, start)) { |
| 1184 | ret = get_gate_page(mm, start & PAGE_MASK, |
| 1185 | gup_flags, &vma, |
| 1186 | pages ? &pages[i] : NULL); |
| 1187 | if (ret) |
| 1188 | goto out; |
| 1189 | ctx.page_mask = 0; |
| 1190 | goto next_page; |
| 1191 | } |
| 1192 | |
| 1193 | if (!vma) { |
| 1194 | ret = -EFAULT; |
| 1195 | goto out; |
| 1196 | } |
| 1197 | ret = check_vma_flags(vma, gup_flags); |
| 1198 | if (ret) |
| 1199 | goto out; |
| 1200 | |
| 1201 | if (is_vm_hugetlb_page(vma)) { |
| 1202 | i = follow_hugetlb_page(mm, vma, pages, vmas, |
| 1203 | &start, &nr_pages, i, |
| 1204 | gup_flags, locked); |
| 1205 | if (locked && *locked == 0) { |
| 1206 | /* |
| 1207 | * We've got a VM_FAULT_RETRY |
| 1208 | * and we've lost mmap_lock. |
| 1209 | * We must stop here. |
| 1210 | */ |
| 1211 | BUG_ON(gup_flags & FOLL_NOWAIT); |
| 1212 | goto out; |
| 1213 | } |
| 1214 | continue; |
| 1215 | } |
| 1216 | } |
| 1217 | retry: |
| 1218 | /* |
| 1219 | * If we have a pending SIGKILL, don't keep faulting pages and |
| 1220 | * potentially allocating memory. |
| 1221 | */ |
| 1222 | if (fatal_signal_pending(current)) { |
| 1223 | ret = -EINTR; |
| 1224 | goto out; |
| 1225 | } |
| 1226 | cond_resched(); |
| 1227 | |
| 1228 | page = follow_page_mask(vma, start, foll_flags, &ctx); |
| 1229 | if (!page || PTR_ERR(page) == -EMLINK) { |
| 1230 | ret = faultin_page(vma, start, &foll_flags, |
| 1231 | PTR_ERR(page) == -EMLINK, locked); |
| 1232 | switch (ret) { |
| 1233 | case 0: |
| 1234 | goto retry; |
| 1235 | case -EBUSY: |
| 1236 | case -EAGAIN: |
| 1237 | ret = 0; |
| 1238 | fallthrough; |
| 1239 | case -EFAULT: |
| 1240 | case -ENOMEM: |
| 1241 | case -EHWPOISON: |
| 1242 | goto out; |
| 1243 | } |
| 1244 | BUG(); |
| 1245 | } else if (PTR_ERR(page) == -EEXIST) { |
| 1246 | /* |
| 1247 | * Proper page table entry exists, but no corresponding |
| 1248 | * struct page. If the caller expects **pages to be |
| 1249 | * filled in, bail out now, because that can't be done |
| 1250 | * for this page. |
| 1251 | */ |
| 1252 | if (pages) { |
| 1253 | ret = PTR_ERR(page); |
| 1254 | goto out; |
| 1255 | } |
| 1256 | |
| 1257 | goto next_page; |
| 1258 | } else if (IS_ERR(page)) { |
| 1259 | ret = PTR_ERR(page); |
| 1260 | goto out; |
| 1261 | } |
| 1262 | if (pages) { |
| 1263 | pages[i] = page; |
| 1264 | flush_anon_page(vma, page, start); |
| 1265 | flush_dcache_page(page); |
| 1266 | ctx.page_mask = 0; |
| 1267 | } |
| 1268 | next_page: |
| 1269 | if (vmas) { |
| 1270 | vmas[i] = vma; |
| 1271 | ctx.page_mask = 0; |
| 1272 | } |
| 1273 | page_increm = 1 + (~(start >> PAGE_SHIFT) & ctx.page_mask); |
| 1274 | if (page_increm > nr_pages) |
| 1275 | page_increm = nr_pages; |
| 1276 | i += page_increm; |
| 1277 | start += page_increm * PAGE_SIZE; |
| 1278 | nr_pages -= page_increm; |
| 1279 | } while (nr_pages); |
| 1280 | out: |
| 1281 | if (ctx.pgmap) |
| 1282 | put_dev_pagemap(ctx.pgmap); |
| 1283 | return i ? i : ret; |
| 1284 | } |
| 1285 | |
| 1286 | static bool vma_permits_fault(struct vm_area_struct *vma, |
| 1287 | unsigned int fault_flags) |
| 1288 | { |
| 1289 | bool write = !!(fault_flags & FAULT_FLAG_WRITE); |
| 1290 | bool foreign = !!(fault_flags & FAULT_FLAG_REMOTE); |
| 1291 | vm_flags_t vm_flags = write ? VM_WRITE : VM_READ; |
| 1292 | |
| 1293 | if (!(vm_flags & vma->vm_flags)) |
| 1294 | return false; |
| 1295 | |
| 1296 | /* |
| 1297 | * The architecture might have a hardware protection |
| 1298 | * mechanism other than read/write that can deny access. |
| 1299 | * |
| 1300 | * gup always represents data access, not instruction |
| 1301 | * fetches, so execute=false here: |
| 1302 | */ |
| 1303 | if (!arch_vma_access_permitted(vma, write, false, foreign)) |
| 1304 | return false; |
| 1305 | |
| 1306 | return true; |
| 1307 | } |
| 1308 | |
| 1309 | /** |
| 1310 | * fixup_user_fault() - manually resolve a user page fault |
| 1311 | * @mm: mm_struct of target mm |
| 1312 | * @address: user address |
| 1313 | * @fault_flags:flags to pass down to handle_mm_fault() |
| 1314 | * @unlocked: did we unlock the mmap_lock while retrying, maybe NULL if caller |
| 1315 | * does not allow retry. If NULL, the caller must guarantee |
| 1316 | * that fault_flags does not contain FAULT_FLAG_ALLOW_RETRY. |
| 1317 | * |
| 1318 | * This is meant to be called in the specific scenario where for locking reasons |
| 1319 | * we try to access user memory in atomic context (within a pagefault_disable() |
| 1320 | * section), this returns -EFAULT, and we want to resolve the user fault before |
| 1321 | * trying again. |
| 1322 | * |
| 1323 | * Typically this is meant to be used by the futex code. |
| 1324 | * |
| 1325 | * The main difference with get_user_pages() is that this function will |
| 1326 | * unconditionally call handle_mm_fault() which will in turn perform all the |
| 1327 | * necessary SW fixup of the dirty and young bits in the PTE, while |
| 1328 | * get_user_pages() only guarantees to update these in the struct page. |
| 1329 | * |
| 1330 | * This is important for some architectures where those bits also gate the |
| 1331 | * access permission to the page because they are maintained in software. On |
| 1332 | * such architectures, gup() will not be enough to make a subsequent access |
| 1333 | * succeed. |
| 1334 | * |
| 1335 | * This function will not return with an unlocked mmap_lock. So it has not the |
| 1336 | * same semantics wrt the @mm->mmap_lock as does filemap_fault(). |
| 1337 | */ |
| 1338 | int fixup_user_fault(struct mm_struct *mm, |
| 1339 | unsigned long address, unsigned int fault_flags, |
| 1340 | bool *unlocked) |
| 1341 | { |
| 1342 | struct vm_area_struct *vma; |
| 1343 | vm_fault_t ret; |
| 1344 | |
| 1345 | address = untagged_addr(address); |
| 1346 | |
| 1347 | if (unlocked) |
| 1348 | fault_flags |= FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE; |
| 1349 | |
| 1350 | retry: |
| 1351 | vma = find_extend_vma(mm, address); |
| 1352 | if (!vma || address < vma->vm_start) |
| 1353 | return -EFAULT; |
| 1354 | |
| 1355 | if (!vma_permits_fault(vma, fault_flags)) |
| 1356 | return -EFAULT; |
| 1357 | |
| 1358 | if ((fault_flags & FAULT_FLAG_KILLABLE) && |
| 1359 | fatal_signal_pending(current)) |
| 1360 | return -EINTR; |
| 1361 | |
| 1362 | ret = handle_mm_fault(vma, address, fault_flags, NULL); |
| 1363 | |
| 1364 | if (ret & VM_FAULT_COMPLETED) { |
| 1365 | /* |
| 1366 | * NOTE: it's a pity that we need to retake the lock here |
| 1367 | * to pair with the unlock() in the callers. Ideally we |
| 1368 | * could tell the callers so they do not need to unlock. |
| 1369 | */ |
| 1370 | mmap_read_lock(mm); |
| 1371 | *unlocked = true; |
| 1372 | return 0; |
| 1373 | } |
| 1374 | |
| 1375 | if (ret & VM_FAULT_ERROR) { |
| 1376 | int err = vm_fault_to_errno(ret, 0); |
| 1377 | |
| 1378 | if (err) |
| 1379 | return err; |
| 1380 | BUG(); |
| 1381 | } |
| 1382 | |
| 1383 | if (ret & VM_FAULT_RETRY) { |
| 1384 | mmap_read_lock(mm); |
| 1385 | *unlocked = true; |
| 1386 | fault_flags |= FAULT_FLAG_TRIED; |
| 1387 | goto retry; |
| 1388 | } |
| 1389 | |
| 1390 | return 0; |
| 1391 | } |
| 1392 | EXPORT_SYMBOL_GPL(fixup_user_fault); |
| 1393 | |
| 1394 | /* |
| 1395 | * Please note that this function, unlike __get_user_pages will not |
| 1396 | * return 0 for nr_pages > 0 without FOLL_NOWAIT |
| 1397 | */ |
| 1398 | static __always_inline long __get_user_pages_locked(struct mm_struct *mm, |
| 1399 | unsigned long start, |
| 1400 | unsigned long nr_pages, |
| 1401 | struct page **pages, |
| 1402 | struct vm_area_struct **vmas, |
| 1403 | int *locked, |
| 1404 | unsigned int flags) |
| 1405 | { |
| 1406 | long ret, pages_done; |
| 1407 | bool lock_dropped; |
| 1408 | |
| 1409 | if (locked) { |
| 1410 | /* if VM_FAULT_RETRY can be returned, vmas become invalid */ |
| 1411 | BUG_ON(vmas); |
| 1412 | /* check caller initialized locked */ |
| 1413 | BUG_ON(*locked != 1); |
| 1414 | } |
| 1415 | |
| 1416 | if (flags & FOLL_PIN) |
| 1417 | mm_set_has_pinned_flag(&mm->flags); |
| 1418 | |
| 1419 | /* |
| 1420 | * FOLL_PIN and FOLL_GET are mutually exclusive. Traditional behavior |
| 1421 | * is to set FOLL_GET if the caller wants pages[] filled in (but has |
| 1422 | * carelessly failed to specify FOLL_GET), so keep doing that, but only |
| 1423 | * for FOLL_GET, not for the newer FOLL_PIN. |
| 1424 | * |
| 1425 | * FOLL_PIN always expects pages to be non-null, but no need to assert |
| 1426 | * that here, as any failures will be obvious enough. |
| 1427 | */ |
| 1428 | if (pages && !(flags & FOLL_PIN)) |
| 1429 | flags |= FOLL_GET; |
| 1430 | |
| 1431 | pages_done = 0; |
| 1432 | lock_dropped = false; |
| 1433 | for (;;) { |
| 1434 | ret = __get_user_pages(mm, start, nr_pages, flags, pages, |
| 1435 | vmas, locked); |
| 1436 | if (!locked) |
| 1437 | /* VM_FAULT_RETRY couldn't trigger, bypass */ |
| 1438 | return ret; |
| 1439 | |
| 1440 | /* VM_FAULT_RETRY or VM_FAULT_COMPLETED cannot return errors */ |
| 1441 | if (!*locked) { |
| 1442 | BUG_ON(ret < 0); |
| 1443 | BUG_ON(ret >= nr_pages); |
| 1444 | } |
| 1445 | |
| 1446 | if (ret > 0) { |
| 1447 | nr_pages -= ret; |
| 1448 | pages_done += ret; |
| 1449 | if (!nr_pages) |
| 1450 | break; |
| 1451 | } |
| 1452 | if (*locked) { |
| 1453 | /* |
| 1454 | * VM_FAULT_RETRY didn't trigger or it was a |
| 1455 | * FOLL_NOWAIT. |
| 1456 | */ |
| 1457 | if (!pages_done) |
| 1458 | pages_done = ret; |
| 1459 | break; |
| 1460 | } |
| 1461 | /* |
| 1462 | * VM_FAULT_RETRY triggered, so seek to the faulting offset. |
| 1463 | * For the prefault case (!pages) we only update counts. |
| 1464 | */ |
| 1465 | if (likely(pages)) |
| 1466 | pages += ret; |
| 1467 | start += ret << PAGE_SHIFT; |
| 1468 | lock_dropped = true; |
| 1469 | |
| 1470 | retry: |
| 1471 | /* |
| 1472 | * Repeat on the address that fired VM_FAULT_RETRY |
| 1473 | * with both FAULT_FLAG_ALLOW_RETRY and |
| 1474 | * FAULT_FLAG_TRIED. Note that GUP can be interrupted |
| 1475 | * by fatal signals, so we need to check it before we |
| 1476 | * start trying again otherwise it can loop forever. |
| 1477 | */ |
| 1478 | |
| 1479 | if (fatal_signal_pending(current)) { |
| 1480 | if (!pages_done) |
| 1481 | pages_done = -EINTR; |
| 1482 | break; |
| 1483 | } |
| 1484 | |
| 1485 | ret = mmap_read_lock_killable(mm); |
| 1486 | if (ret) { |
| 1487 | BUG_ON(ret > 0); |
| 1488 | if (!pages_done) |
| 1489 | pages_done = ret; |
| 1490 | break; |
| 1491 | } |
| 1492 | |
| 1493 | *locked = 1; |
| 1494 | ret = __get_user_pages(mm, start, 1, flags | FOLL_TRIED, |
| 1495 | pages, NULL, locked); |
| 1496 | if (!*locked) { |
| 1497 | /* Continue to retry until we succeeded */ |
| 1498 | BUG_ON(ret != 0); |
| 1499 | goto retry; |
| 1500 | } |
| 1501 | if (ret != 1) { |
| 1502 | BUG_ON(ret > 1); |
| 1503 | if (!pages_done) |
| 1504 | pages_done = ret; |
| 1505 | break; |
| 1506 | } |
| 1507 | nr_pages--; |
| 1508 | pages_done++; |
| 1509 | if (!nr_pages) |
| 1510 | break; |
| 1511 | if (likely(pages)) |
| 1512 | pages++; |
| 1513 | start += PAGE_SIZE; |
| 1514 | } |
| 1515 | if (lock_dropped && *locked) { |
| 1516 | /* |
| 1517 | * We must let the caller know we temporarily dropped the lock |
| 1518 | * and so the critical section protected by it was lost. |
| 1519 | */ |
| 1520 | mmap_read_unlock(mm); |
| 1521 | *locked = 0; |
| 1522 | } |
| 1523 | return pages_done; |
| 1524 | } |
| 1525 | |
| 1526 | /** |
| 1527 | * populate_vma_page_range() - populate a range of pages in the vma. |
| 1528 | * @vma: target vma |
| 1529 | * @start: start address |
| 1530 | * @end: end address |
| 1531 | * @locked: whether the mmap_lock is still held |
| 1532 | * |
| 1533 | * This takes care of mlocking the pages too if VM_LOCKED is set. |
| 1534 | * |
| 1535 | * Return either number of pages pinned in the vma, or a negative error |
| 1536 | * code on error. |
| 1537 | * |
| 1538 | * vma->vm_mm->mmap_lock must be held. |
| 1539 | * |
| 1540 | * If @locked is NULL, it may be held for read or write and will |
| 1541 | * be unperturbed. |
| 1542 | * |
| 1543 | * If @locked is non-NULL, it must held for read only and may be |
| 1544 | * released. If it's released, *@locked will be set to 0. |
| 1545 | */ |
| 1546 | long populate_vma_page_range(struct vm_area_struct *vma, |
| 1547 | unsigned long start, unsigned long end, int *locked) |
| 1548 | { |
| 1549 | struct mm_struct *mm = vma->vm_mm; |
| 1550 | unsigned long nr_pages = (end - start) / PAGE_SIZE; |
| 1551 | int gup_flags; |
| 1552 | long ret; |
| 1553 | |
| 1554 | VM_BUG_ON(!PAGE_ALIGNED(start)); |
| 1555 | VM_BUG_ON(!PAGE_ALIGNED(end)); |
| 1556 | VM_BUG_ON_VMA(start < vma->vm_start, vma); |
| 1557 | VM_BUG_ON_VMA(end > vma->vm_end, vma); |
| 1558 | mmap_assert_locked(mm); |
| 1559 | |
| 1560 | /* |
| 1561 | * Rightly or wrongly, the VM_LOCKONFAULT case has never used |
| 1562 | * faultin_page() to break COW, so it has no work to do here. |
| 1563 | */ |
| 1564 | if (vma->vm_flags & VM_LOCKONFAULT) |
| 1565 | return nr_pages; |
| 1566 | |
| 1567 | gup_flags = FOLL_TOUCH; |
| 1568 | /* |
| 1569 | * We want to touch writable mappings with a write fault in order |
| 1570 | * to break COW, except for shared mappings because these don't COW |
| 1571 | * and we would not want to dirty them for nothing. |
| 1572 | */ |
| 1573 | if ((vma->vm_flags & (VM_WRITE | VM_SHARED)) == VM_WRITE) |
| 1574 | gup_flags |= FOLL_WRITE; |
| 1575 | |
| 1576 | /* |
| 1577 | * We want mlock to succeed for regions that have any permissions |
| 1578 | * other than PROT_NONE. |
| 1579 | */ |
| 1580 | if (vma_is_accessible(vma)) |
| 1581 | gup_flags |= FOLL_FORCE; |
| 1582 | |
| 1583 | /* |
| 1584 | * We made sure addr is within a VMA, so the following will |
| 1585 | * not result in a stack expansion that recurses back here. |
| 1586 | */ |
| 1587 | ret = __get_user_pages(mm, start, nr_pages, gup_flags, |
| 1588 | NULL, NULL, locked); |
| 1589 | lru_add_drain(); |
| 1590 | return ret; |
| 1591 | } |
| 1592 | |
| 1593 | /* |
| 1594 | * faultin_vma_page_range() - populate (prefault) page tables inside the |
| 1595 | * given VMA range readable/writable |
| 1596 | * |
| 1597 | * This takes care of mlocking the pages, too, if VM_LOCKED is set. |
| 1598 | * |
| 1599 | * @vma: target vma |
| 1600 | * @start: start address |
| 1601 | * @end: end address |
| 1602 | * @write: whether to prefault readable or writable |
| 1603 | * @locked: whether the mmap_lock is still held |
| 1604 | * |
| 1605 | * Returns either number of processed pages in the vma, or a negative error |
| 1606 | * code on error (see __get_user_pages()). |
| 1607 | * |
| 1608 | * vma->vm_mm->mmap_lock must be held. The range must be page-aligned and |
| 1609 | * covered by the VMA. |
| 1610 | * |
| 1611 | * If @locked is NULL, it may be held for read or write and will be unperturbed. |
| 1612 | * |
| 1613 | * If @locked is non-NULL, it must held for read only and may be released. If |
| 1614 | * it's released, *@locked will be set to 0. |
| 1615 | */ |
| 1616 | long faultin_vma_page_range(struct vm_area_struct *vma, unsigned long start, |
| 1617 | unsigned long end, bool write, int *locked) |
| 1618 | { |
| 1619 | struct mm_struct *mm = vma->vm_mm; |
| 1620 | unsigned long nr_pages = (end - start) / PAGE_SIZE; |
| 1621 | int gup_flags; |
| 1622 | long ret; |
| 1623 | |
| 1624 | VM_BUG_ON(!PAGE_ALIGNED(start)); |
| 1625 | VM_BUG_ON(!PAGE_ALIGNED(end)); |
| 1626 | VM_BUG_ON_VMA(start < vma->vm_start, vma); |
| 1627 | VM_BUG_ON_VMA(end > vma->vm_end, vma); |
| 1628 | mmap_assert_locked(mm); |
| 1629 | |
| 1630 | /* |
| 1631 | * FOLL_TOUCH: Mark page accessed and thereby young; will also mark |
| 1632 | * the page dirty with FOLL_WRITE -- which doesn't make a |
| 1633 | * difference with !FOLL_FORCE, because the page is writable |
| 1634 | * in the page table. |
| 1635 | * FOLL_HWPOISON: Return -EHWPOISON instead of -EFAULT when we hit |
| 1636 | * a poisoned page. |
| 1637 | * !FOLL_FORCE: Require proper access permissions. |
| 1638 | */ |
| 1639 | gup_flags = FOLL_TOUCH | FOLL_HWPOISON; |
| 1640 | if (write) |
| 1641 | gup_flags |= FOLL_WRITE; |
| 1642 | |
| 1643 | /* |
| 1644 | * We want to report -EINVAL instead of -EFAULT for any permission |
| 1645 | * problems or incompatible mappings. |
| 1646 | */ |
| 1647 | if (check_vma_flags(vma, gup_flags)) |
| 1648 | return -EINVAL; |
| 1649 | |
| 1650 | ret = __get_user_pages(mm, start, nr_pages, gup_flags, |
| 1651 | NULL, NULL, locked); |
| 1652 | lru_add_drain(); |
| 1653 | return ret; |
| 1654 | } |
| 1655 | |
| 1656 | /* |
| 1657 | * __mm_populate - populate and/or mlock pages within a range of address space. |
| 1658 | * |
| 1659 | * This is used to implement mlock() and the MAP_POPULATE / MAP_LOCKED mmap |
| 1660 | * flags. VMAs must be already marked with the desired vm_flags, and |
| 1661 | * mmap_lock must not be held. |
| 1662 | */ |
| 1663 | int __mm_populate(unsigned long start, unsigned long len, int ignore_errors) |
| 1664 | { |
| 1665 | struct mm_struct *mm = current->mm; |
| 1666 | unsigned long end, nstart, nend; |
| 1667 | struct vm_area_struct *vma = NULL; |
| 1668 | int locked = 0; |
| 1669 | long ret = 0; |
| 1670 | |
| 1671 | end = start + len; |
| 1672 | |
| 1673 | for (nstart = start; nstart < end; nstart = nend) { |
| 1674 | /* |
| 1675 | * We want to fault in pages for [nstart; end) address range. |
| 1676 | * Find first corresponding VMA. |
| 1677 | */ |
| 1678 | if (!locked) { |
| 1679 | locked = 1; |
| 1680 | mmap_read_lock(mm); |
| 1681 | vma = find_vma_intersection(mm, nstart, end); |
| 1682 | } else if (nstart >= vma->vm_end) |
| 1683 | vma = find_vma_intersection(mm, vma->vm_end, end); |
| 1684 | |
| 1685 | if (!vma) |
| 1686 | break; |
| 1687 | /* |
| 1688 | * Set [nstart; nend) to intersection of desired address |
| 1689 | * range with the first VMA. Also, skip undesirable VMA types. |
| 1690 | */ |
| 1691 | nend = min(end, vma->vm_end); |
| 1692 | if (vma->vm_flags & (VM_IO | VM_PFNMAP)) |
| 1693 | continue; |
| 1694 | if (nstart < vma->vm_start) |
| 1695 | nstart = vma->vm_start; |
| 1696 | /* |
| 1697 | * Now fault in a range of pages. populate_vma_page_range() |
| 1698 | * double checks the vma flags, so that it won't mlock pages |
| 1699 | * if the vma was already munlocked. |
| 1700 | */ |
| 1701 | ret = populate_vma_page_range(vma, nstart, nend, &locked); |
| 1702 | if (ret < 0) { |
| 1703 | if (ignore_errors) { |
| 1704 | ret = 0; |
| 1705 | continue; /* continue at next VMA */ |
| 1706 | } |
| 1707 | break; |
| 1708 | } |
| 1709 | nend = nstart + ret * PAGE_SIZE; |
| 1710 | ret = 0; |
| 1711 | } |
| 1712 | if (locked) |
| 1713 | mmap_read_unlock(mm); |
| 1714 | return ret; /* 0 or negative error code */ |
| 1715 | } |
| 1716 | #else /* CONFIG_MMU */ |
| 1717 | static long __get_user_pages_locked(struct mm_struct *mm, unsigned long start, |
| 1718 | unsigned long nr_pages, struct page **pages, |
| 1719 | struct vm_area_struct **vmas, int *locked, |
| 1720 | unsigned int foll_flags) |
| 1721 | { |
| 1722 | struct vm_area_struct *vma; |
| 1723 | unsigned long vm_flags; |
| 1724 | long i; |
| 1725 | |
| 1726 | /* calculate required read or write permissions. |
| 1727 | * If FOLL_FORCE is set, we only require the "MAY" flags. |
| 1728 | */ |
| 1729 | vm_flags = (foll_flags & FOLL_WRITE) ? |
| 1730 | (VM_WRITE | VM_MAYWRITE) : (VM_READ | VM_MAYREAD); |
| 1731 | vm_flags &= (foll_flags & FOLL_FORCE) ? |
| 1732 | (VM_MAYREAD | VM_MAYWRITE) : (VM_READ | VM_WRITE); |
| 1733 | |
| 1734 | for (i = 0; i < nr_pages; i++) { |
| 1735 | vma = find_vma(mm, start); |
| 1736 | if (!vma) |
| 1737 | goto finish_or_fault; |
| 1738 | |
| 1739 | /* protect what we can, including chardevs */ |
| 1740 | if ((vma->vm_flags & (VM_IO | VM_PFNMAP)) || |
| 1741 | !(vm_flags & vma->vm_flags)) |
| 1742 | goto finish_or_fault; |
| 1743 | |
| 1744 | if (pages) { |
| 1745 | pages[i] = virt_to_page((void *)start); |
| 1746 | if (pages[i]) |
| 1747 | get_page(pages[i]); |
| 1748 | } |
| 1749 | if (vmas) |
| 1750 | vmas[i] = vma; |
| 1751 | start = (start + PAGE_SIZE) & PAGE_MASK; |
| 1752 | } |
| 1753 | |
| 1754 | return i; |
| 1755 | |
| 1756 | finish_or_fault: |
| 1757 | return i ? : -EFAULT; |
| 1758 | } |
| 1759 | #endif /* !CONFIG_MMU */ |
| 1760 | |
| 1761 | /** |
| 1762 | * fault_in_writeable - fault in userspace address range for writing |
| 1763 | * @uaddr: start of address range |
| 1764 | * @size: size of address range |
| 1765 | * |
| 1766 | * Returns the number of bytes not faulted in (like copy_to_user() and |
| 1767 | * copy_from_user()). |
| 1768 | */ |
| 1769 | size_t fault_in_writeable(char __user *uaddr, size_t size) |
| 1770 | { |
| 1771 | char __user *start = uaddr, *end; |
| 1772 | |
| 1773 | if (unlikely(size == 0)) |
| 1774 | return 0; |
| 1775 | if (!user_write_access_begin(uaddr, size)) |
| 1776 | return size; |
| 1777 | if (!PAGE_ALIGNED(uaddr)) { |
| 1778 | unsafe_put_user(0, uaddr, out); |
| 1779 | uaddr = (char __user *)PAGE_ALIGN((unsigned long)uaddr); |
| 1780 | } |
| 1781 | end = (char __user *)PAGE_ALIGN((unsigned long)start + size); |
| 1782 | if (unlikely(end < start)) |
| 1783 | end = NULL; |
| 1784 | while (uaddr != end) { |
| 1785 | unsafe_put_user(0, uaddr, out); |
| 1786 | uaddr += PAGE_SIZE; |
| 1787 | } |
| 1788 | |
| 1789 | out: |
| 1790 | user_write_access_end(); |
| 1791 | if (size > uaddr - start) |
| 1792 | return size - (uaddr - start); |
| 1793 | return 0; |
| 1794 | } |
| 1795 | EXPORT_SYMBOL(fault_in_writeable); |
| 1796 | |
| 1797 | /** |
| 1798 | * fault_in_subpage_writeable - fault in an address range for writing |
| 1799 | * @uaddr: start of address range |
| 1800 | * @size: size of address range |
| 1801 | * |
| 1802 | * Fault in a user address range for writing while checking for permissions at |
| 1803 | * sub-page granularity (e.g. arm64 MTE). This function should be used when |
| 1804 | * the caller cannot guarantee forward progress of a copy_to_user() loop. |
| 1805 | * |
| 1806 | * Returns the number of bytes not faulted in (like copy_to_user() and |
| 1807 | * copy_from_user()). |
| 1808 | */ |
| 1809 | size_t fault_in_subpage_writeable(char __user *uaddr, size_t size) |
| 1810 | { |
| 1811 | size_t faulted_in; |
| 1812 | |
| 1813 | /* |
| 1814 | * Attempt faulting in at page granularity first for page table |
| 1815 | * permission checking. The arch-specific probe_subpage_writeable() |
| 1816 | * functions may not check for this. |
| 1817 | */ |
| 1818 | faulted_in = size - fault_in_writeable(uaddr, size); |
| 1819 | if (faulted_in) |
| 1820 | faulted_in -= probe_subpage_writeable(uaddr, faulted_in); |
| 1821 | |
| 1822 | return size - faulted_in; |
| 1823 | } |
| 1824 | EXPORT_SYMBOL(fault_in_subpage_writeable); |
| 1825 | |
| 1826 | /* |
| 1827 | * fault_in_safe_writeable - fault in an address range for writing |
| 1828 | * @uaddr: start of address range |
| 1829 | * @size: length of address range |
| 1830 | * |
| 1831 | * Faults in an address range for writing. This is primarily useful when we |
| 1832 | * already know that some or all of the pages in the address range aren't in |
| 1833 | * memory. |
| 1834 | * |
| 1835 | * Unlike fault_in_writeable(), this function is non-destructive. |
| 1836 | * |
| 1837 | * Note that we don't pin or otherwise hold the pages referenced that we fault |
| 1838 | * in. There's no guarantee that they'll stay in memory for any duration of |
| 1839 | * time. |
| 1840 | * |
| 1841 | * Returns the number of bytes not faulted in, like copy_to_user() and |
| 1842 | * copy_from_user(). |
| 1843 | */ |
| 1844 | size_t fault_in_safe_writeable(const char __user *uaddr, size_t size) |
| 1845 | { |
| 1846 | unsigned long start = (unsigned long)uaddr, end; |
| 1847 | struct mm_struct *mm = current->mm; |
| 1848 | bool unlocked = false; |
| 1849 | |
| 1850 | if (unlikely(size == 0)) |
| 1851 | return 0; |
| 1852 | end = PAGE_ALIGN(start + size); |
| 1853 | if (end < start) |
| 1854 | end = 0; |
| 1855 | |
| 1856 | mmap_read_lock(mm); |
| 1857 | do { |
| 1858 | if (fixup_user_fault(mm, start, FAULT_FLAG_WRITE, &unlocked)) |
| 1859 | break; |
| 1860 | start = (start + PAGE_SIZE) & PAGE_MASK; |
| 1861 | } while (start != end); |
| 1862 | mmap_read_unlock(mm); |
| 1863 | |
| 1864 | if (size > (unsigned long)uaddr - start) |
| 1865 | return size - ((unsigned long)uaddr - start); |
| 1866 | return 0; |
| 1867 | } |
| 1868 | EXPORT_SYMBOL(fault_in_safe_writeable); |
| 1869 | |
| 1870 | /** |
| 1871 | * fault_in_readable - fault in userspace address range for reading |
| 1872 | * @uaddr: start of user address range |
| 1873 | * @size: size of user address range |
| 1874 | * |
| 1875 | * Returns the number of bytes not faulted in (like copy_to_user() and |
| 1876 | * copy_from_user()). |
| 1877 | */ |
| 1878 | size_t fault_in_readable(const char __user *uaddr, size_t size) |
| 1879 | { |
| 1880 | const char __user *start = uaddr, *end; |
| 1881 | volatile char c; |
| 1882 | |
| 1883 | if (unlikely(size == 0)) |
| 1884 | return 0; |
| 1885 | if (!user_read_access_begin(uaddr, size)) |
| 1886 | return size; |
| 1887 | if (!PAGE_ALIGNED(uaddr)) { |
| 1888 | unsafe_get_user(c, uaddr, out); |
| 1889 | uaddr = (const char __user *)PAGE_ALIGN((unsigned long)uaddr); |
| 1890 | } |
| 1891 | end = (const char __user *)PAGE_ALIGN((unsigned long)start + size); |
| 1892 | if (unlikely(end < start)) |
| 1893 | end = NULL; |
| 1894 | while (uaddr != end) { |
| 1895 | unsafe_get_user(c, uaddr, out); |
| 1896 | uaddr += PAGE_SIZE; |
| 1897 | } |
| 1898 | |
| 1899 | out: |
| 1900 | user_read_access_end(); |
| 1901 | (void)c; |
| 1902 | if (size > uaddr - start) |
| 1903 | return size - (uaddr - start); |
| 1904 | return 0; |
| 1905 | } |
| 1906 | EXPORT_SYMBOL(fault_in_readable); |
| 1907 | |
| 1908 | /** |
| 1909 | * get_dump_page() - pin user page in memory while writing it to core dump |
| 1910 | * @addr: user address |
| 1911 | * |
| 1912 | * Returns struct page pointer of user page pinned for dump, |
| 1913 | * to be freed afterwards by put_page(). |
| 1914 | * |
| 1915 | * Returns NULL on any kind of failure - a hole must then be inserted into |
| 1916 | * the corefile, to preserve alignment with its headers; and also returns |
| 1917 | * NULL wherever the ZERO_PAGE, or an anonymous pte_none, has been found - |
| 1918 | * allowing a hole to be left in the corefile to save disk space. |
| 1919 | * |
| 1920 | * Called without mmap_lock (takes and releases the mmap_lock by itself). |
| 1921 | */ |
| 1922 | #ifdef CONFIG_ELF_CORE |
| 1923 | struct page *get_dump_page(unsigned long addr) |
| 1924 | { |
| 1925 | struct mm_struct *mm = current->mm; |
| 1926 | struct page *page; |
| 1927 | int locked = 1; |
| 1928 | int ret; |
| 1929 | |
| 1930 | if (mmap_read_lock_killable(mm)) |
| 1931 | return NULL; |
| 1932 | ret = __get_user_pages_locked(mm, addr, 1, &page, NULL, &locked, |
| 1933 | FOLL_FORCE | FOLL_DUMP | FOLL_GET); |
| 1934 | if (locked) |
| 1935 | mmap_read_unlock(mm); |
| 1936 | return (ret == 1) ? page : NULL; |
| 1937 | } |
| 1938 | #endif /* CONFIG_ELF_CORE */ |
| 1939 | |
| 1940 | #ifdef CONFIG_MIGRATION |
| 1941 | /* |
| 1942 | * Returns the number of collected pages. Return value is always >= 0. |
| 1943 | */ |
| 1944 | static unsigned long collect_longterm_unpinnable_pages( |
| 1945 | struct list_head *movable_page_list, |
| 1946 | unsigned long nr_pages, |
| 1947 | struct page **pages) |
| 1948 | { |
| 1949 | unsigned long i, collected = 0; |
| 1950 | struct folio *prev_folio = NULL; |
| 1951 | bool drain_allow = true; |
| 1952 | |
| 1953 | for (i = 0; i < nr_pages; i++) { |
| 1954 | struct folio *folio = page_folio(pages[i]); |
| 1955 | |
| 1956 | if (folio == prev_folio) |
| 1957 | continue; |
| 1958 | prev_folio = folio; |
| 1959 | |
| 1960 | if (folio_is_longterm_pinnable(folio)) |
| 1961 | continue; |
| 1962 | |
| 1963 | collected++; |
| 1964 | |
| 1965 | if (folio_is_device_coherent(folio)) |
| 1966 | continue; |
| 1967 | |
| 1968 | if (folio_test_hugetlb(folio)) { |
| 1969 | isolate_hugetlb(&folio->page, movable_page_list); |
| 1970 | continue; |
| 1971 | } |
| 1972 | |
| 1973 | if (!folio_test_lru(folio) && drain_allow) { |
| 1974 | lru_add_drain_all(); |
| 1975 | drain_allow = false; |
| 1976 | } |
| 1977 | |
| 1978 | if (!folio_isolate_lru(folio)) |
| 1979 | continue; |
| 1980 | |
| 1981 | list_add_tail(&folio->lru, movable_page_list); |
| 1982 | node_stat_mod_folio(folio, |
| 1983 | NR_ISOLATED_ANON + folio_is_file_lru(folio), |
| 1984 | folio_nr_pages(folio)); |
| 1985 | } |
| 1986 | |
| 1987 | return collected; |
| 1988 | } |
| 1989 | |
| 1990 | /* |
| 1991 | * Unpins all pages and migrates device coherent pages and movable_page_list. |
| 1992 | * Returns -EAGAIN if all pages were successfully migrated or -errno for failure |
| 1993 | * (or partial success). |
| 1994 | */ |
| 1995 | static int migrate_longterm_unpinnable_pages( |
| 1996 | struct list_head *movable_page_list, |
| 1997 | unsigned long nr_pages, |
| 1998 | struct page **pages) |
| 1999 | { |
| 2000 | int ret; |
| 2001 | unsigned long i; |
| 2002 | |
| 2003 | for (i = 0; i < nr_pages; i++) { |
| 2004 | struct folio *folio = page_folio(pages[i]); |
| 2005 | |
| 2006 | if (folio_is_device_coherent(folio)) { |
| 2007 | /* |
| 2008 | * Migration will fail if the page is pinned, so convert |
| 2009 | * the pin on the source page to a normal reference. |
| 2010 | */ |
| 2011 | pages[i] = NULL; |
| 2012 | folio_get(folio); |
| 2013 | gup_put_folio(folio, 1, FOLL_PIN); |
| 2014 | |
| 2015 | if (migrate_device_coherent_page(&folio->page)) { |
| 2016 | ret = -EBUSY; |
| 2017 | goto err; |
| 2018 | } |
| 2019 | |
| 2020 | continue; |
| 2021 | } |
| 2022 | |
| 2023 | /* |
| 2024 | * We can't migrate pages with unexpected references, so drop |
| 2025 | * the reference obtained by __get_user_pages_locked(). |
| 2026 | * Migrating pages have been added to movable_page_list after |
| 2027 | * calling folio_isolate_lru() which takes a reference so the |
| 2028 | * page won't be freed if it's migrating. |
| 2029 | */ |
| 2030 | unpin_user_page(pages[i]); |
| 2031 | pages[i] = NULL; |
| 2032 | } |
| 2033 | |
| 2034 | if (!list_empty(movable_page_list)) { |
| 2035 | struct migration_target_control mtc = { |
| 2036 | .nid = NUMA_NO_NODE, |
| 2037 | .gfp_mask = GFP_USER | __GFP_NOWARN, |
| 2038 | }; |
| 2039 | |
| 2040 | if (migrate_pages(movable_page_list, alloc_migration_target, |
| 2041 | NULL, (unsigned long)&mtc, MIGRATE_SYNC, |
| 2042 | MR_LONGTERM_PIN, NULL)) { |
| 2043 | ret = -ENOMEM; |
| 2044 | goto err; |
| 2045 | } |
| 2046 | } |
| 2047 | |
| 2048 | putback_movable_pages(movable_page_list); |
| 2049 | |
| 2050 | return -EAGAIN; |
| 2051 | |
| 2052 | err: |
| 2053 | for (i = 0; i < nr_pages; i++) |
| 2054 | if (pages[i]) |
| 2055 | unpin_user_page(pages[i]); |
| 2056 | putback_movable_pages(movable_page_list); |
| 2057 | |
| 2058 | return ret; |
| 2059 | } |
| 2060 | |
| 2061 | /* |
| 2062 | * Check whether all pages are *allowed* to be pinned. Rather confusingly, all |
| 2063 | * pages in the range are required to be pinned via FOLL_PIN, before calling |
| 2064 | * this routine. |
| 2065 | * |
| 2066 | * If any pages in the range are not allowed to be pinned, then this routine |
| 2067 | * will migrate those pages away, unpin all the pages in the range and return |
| 2068 | * -EAGAIN. The caller should re-pin the entire range with FOLL_PIN and then |
| 2069 | * call this routine again. |
| 2070 | * |
| 2071 | * If an error other than -EAGAIN occurs, this indicates a migration failure. |
| 2072 | * The caller should give up, and propagate the error back up the call stack. |
| 2073 | * |
| 2074 | * If everything is OK and all pages in the range are allowed to be pinned, then |
| 2075 | * this routine leaves all pages pinned and returns zero for success. |
| 2076 | */ |
| 2077 | static long check_and_migrate_movable_pages(unsigned long nr_pages, |
| 2078 | struct page **pages) |
| 2079 | { |
| 2080 | unsigned long collected; |
| 2081 | LIST_HEAD(movable_page_list); |
| 2082 | |
| 2083 | collected = collect_longterm_unpinnable_pages(&movable_page_list, |
| 2084 | nr_pages, pages); |
| 2085 | if (!collected) |
| 2086 | return 0; |
| 2087 | |
| 2088 | return migrate_longterm_unpinnable_pages(&movable_page_list, nr_pages, |
| 2089 | pages); |
| 2090 | } |
| 2091 | #else |
| 2092 | static long check_and_migrate_movable_pages(unsigned long nr_pages, |
| 2093 | struct page **pages) |
| 2094 | { |
| 2095 | return 0; |
| 2096 | } |
| 2097 | #endif /* CONFIG_MIGRATION */ |
| 2098 | |
| 2099 | /* |
| 2100 | * __gup_longterm_locked() is a wrapper for __get_user_pages_locked which |
| 2101 | * allows us to process the FOLL_LONGTERM flag. |
| 2102 | */ |
| 2103 | static long __gup_longterm_locked(struct mm_struct *mm, |
| 2104 | unsigned long start, |
| 2105 | unsigned long nr_pages, |
| 2106 | struct page **pages, |
| 2107 | struct vm_area_struct **vmas, |
| 2108 | unsigned int gup_flags) |
| 2109 | { |
| 2110 | unsigned int flags; |
| 2111 | long rc, nr_pinned_pages; |
| 2112 | |
| 2113 | if (!(gup_flags & FOLL_LONGTERM)) |
| 2114 | return __get_user_pages_locked(mm, start, nr_pages, pages, vmas, |
| 2115 | NULL, gup_flags); |
| 2116 | |
| 2117 | /* |
| 2118 | * If we get to this point then FOLL_LONGTERM is set, and FOLL_LONGTERM |
| 2119 | * implies FOLL_PIN (although the reverse is not true). Therefore it is |
| 2120 | * correct to unconditionally call check_and_migrate_movable_pages() |
| 2121 | * which assumes pages have been pinned via FOLL_PIN. |
| 2122 | * |
| 2123 | * Enforce the above reasoning by asserting that FOLL_PIN is set. |
| 2124 | */ |
| 2125 | if (WARN_ON(!(gup_flags & FOLL_PIN))) |
| 2126 | return -EINVAL; |
| 2127 | flags = memalloc_pin_save(); |
| 2128 | do { |
| 2129 | nr_pinned_pages = __get_user_pages_locked(mm, start, nr_pages, |
| 2130 | pages, vmas, NULL, |
| 2131 | gup_flags); |
| 2132 | if (nr_pinned_pages <= 0) { |
| 2133 | rc = nr_pinned_pages; |
| 2134 | break; |
| 2135 | } |
| 2136 | rc = check_and_migrate_movable_pages(nr_pinned_pages, pages); |
| 2137 | } while (rc == -EAGAIN); |
| 2138 | memalloc_pin_restore(flags); |
| 2139 | |
| 2140 | return rc ? rc : nr_pinned_pages; |
| 2141 | } |
| 2142 | |
| 2143 | static bool is_valid_gup_flags(unsigned int gup_flags) |
| 2144 | { |
| 2145 | /* |
| 2146 | * FOLL_PIN must only be set internally by the pin_user_pages*() APIs, |
| 2147 | * never directly by the caller, so enforce that with an assertion: |
| 2148 | */ |
| 2149 | if (WARN_ON_ONCE(gup_flags & FOLL_PIN)) |
| 2150 | return false; |
| 2151 | /* |
| 2152 | * FOLL_PIN is a prerequisite to FOLL_LONGTERM. Another way of saying |
| 2153 | * that is, FOLL_LONGTERM is a specific case, more restrictive case of |
| 2154 | * FOLL_PIN. |
| 2155 | */ |
| 2156 | if (WARN_ON_ONCE(gup_flags & FOLL_LONGTERM)) |
| 2157 | return false; |
| 2158 | |
| 2159 | return true; |
| 2160 | } |
| 2161 | |
| 2162 | #ifdef CONFIG_MMU |
| 2163 | static long __get_user_pages_remote(struct mm_struct *mm, |
| 2164 | unsigned long start, unsigned long nr_pages, |
| 2165 | unsigned int gup_flags, struct page **pages, |
| 2166 | struct vm_area_struct **vmas, int *locked) |
| 2167 | { |
| 2168 | /* |
| 2169 | * Parts of FOLL_LONGTERM behavior are incompatible with |
| 2170 | * FAULT_FLAG_ALLOW_RETRY because of the FS DAX check requirement on |
| 2171 | * vmas. However, this only comes up if locked is set, and there are |
| 2172 | * callers that do request FOLL_LONGTERM, but do not set locked. So, |
| 2173 | * allow what we can. |
| 2174 | */ |
| 2175 | if (gup_flags & FOLL_LONGTERM) { |
| 2176 | if (WARN_ON_ONCE(locked)) |
| 2177 | return -EINVAL; |
| 2178 | /* |
| 2179 | * This will check the vmas (even if our vmas arg is NULL) |
| 2180 | * and return -ENOTSUPP if DAX isn't allowed in this case: |
| 2181 | */ |
| 2182 | return __gup_longterm_locked(mm, start, nr_pages, pages, |
| 2183 | vmas, gup_flags | FOLL_TOUCH | |
| 2184 | FOLL_REMOTE); |
| 2185 | } |
| 2186 | |
| 2187 | return __get_user_pages_locked(mm, start, nr_pages, pages, vmas, |
| 2188 | locked, |
| 2189 | gup_flags | FOLL_TOUCH | FOLL_REMOTE); |
| 2190 | } |
| 2191 | |
| 2192 | /** |
| 2193 | * get_user_pages_remote() - pin user pages in memory |
| 2194 | * @mm: mm_struct of target mm |
| 2195 | * @start: starting user address |
| 2196 | * @nr_pages: number of pages from start to pin |
| 2197 | * @gup_flags: flags modifying lookup behaviour |
| 2198 | * @pages: array that receives pointers to the pages pinned. |
| 2199 | * Should be at least nr_pages long. Or NULL, if caller |
| 2200 | * only intends to ensure the pages are faulted in. |
| 2201 | * @vmas: array of pointers to vmas corresponding to each page. |
| 2202 | * Or NULL if the caller does not require them. |
| 2203 | * @locked: pointer to lock flag indicating whether lock is held and |
| 2204 | * subsequently whether VM_FAULT_RETRY functionality can be |
| 2205 | * utilised. Lock must initially be held. |
| 2206 | * |
| 2207 | * Returns either number of pages pinned (which may be less than the |
| 2208 | * number requested), or an error. Details about the return value: |
| 2209 | * |
| 2210 | * -- If nr_pages is 0, returns 0. |
| 2211 | * -- If nr_pages is >0, but no pages were pinned, returns -errno. |
| 2212 | * -- If nr_pages is >0, and some pages were pinned, returns the number of |
| 2213 | * pages pinned. Again, this may be less than nr_pages. |
| 2214 | * |
| 2215 | * The caller is responsible for releasing returned @pages, via put_page(). |
| 2216 | * |
| 2217 | * @vmas are valid only as long as mmap_lock is held. |
| 2218 | * |
| 2219 | * Must be called with mmap_lock held for read or write. |
| 2220 | * |
| 2221 | * get_user_pages_remote walks a process's page tables and takes a reference |
| 2222 | * to each struct page that each user address corresponds to at a given |
| 2223 | * instant. That is, it takes the page that would be accessed if a user |
| 2224 | * thread accesses the given user virtual address at that instant. |
| 2225 | * |
| 2226 | * This does not guarantee that the page exists in the user mappings when |
| 2227 | * get_user_pages_remote returns, and there may even be a completely different |
| 2228 | * page there in some cases (eg. if mmapped pagecache has been invalidated |
| 2229 | * and subsequently re faulted). However it does guarantee that the page |
| 2230 | * won't be freed completely. And mostly callers simply care that the page |
| 2231 | * contains data that was valid *at some point in time*. Typically, an IO |
| 2232 | * or similar operation cannot guarantee anything stronger anyway because |
| 2233 | * locks can't be held over the syscall boundary. |
| 2234 | * |
| 2235 | * If gup_flags & FOLL_WRITE == 0, the page must not be written to. If the page |
| 2236 | * is written to, set_page_dirty (or set_page_dirty_lock, as appropriate) must |
| 2237 | * be called after the page is finished with, and before put_page is called. |
| 2238 | * |
| 2239 | * get_user_pages_remote is typically used for fewer-copy IO operations, |
| 2240 | * to get a handle on the memory by some means other than accesses |
| 2241 | * via the user virtual addresses. The pages may be submitted for |
| 2242 | * DMA to devices or accessed via their kernel linear mapping (via the |
| 2243 | * kmap APIs). Care should be taken to use the correct cache flushing APIs. |
| 2244 | * |
| 2245 | * See also get_user_pages_fast, for performance critical applications. |
| 2246 | * |
| 2247 | * get_user_pages_remote should be phased out in favor of |
| 2248 | * get_user_pages_locked|unlocked or get_user_pages_fast. Nothing |
| 2249 | * should use get_user_pages_remote because it cannot pass |
| 2250 | * FAULT_FLAG_ALLOW_RETRY to handle_mm_fault. |
| 2251 | */ |
| 2252 | long get_user_pages_remote(struct mm_struct *mm, |
| 2253 | unsigned long start, unsigned long nr_pages, |
| 2254 | unsigned int gup_flags, struct page **pages, |
| 2255 | struct vm_area_struct **vmas, int *locked) |
| 2256 | { |
| 2257 | if (!is_valid_gup_flags(gup_flags)) |
| 2258 | return -EINVAL; |
| 2259 | |
| 2260 | return __get_user_pages_remote(mm, start, nr_pages, gup_flags, |
| 2261 | pages, vmas, locked); |
| 2262 | } |
| 2263 | EXPORT_SYMBOL(get_user_pages_remote); |
| 2264 | |
| 2265 | #else /* CONFIG_MMU */ |
| 2266 | long get_user_pages_remote(struct mm_struct *mm, |
| 2267 | unsigned long start, unsigned long nr_pages, |
| 2268 | unsigned int gup_flags, struct page **pages, |
| 2269 | struct vm_area_struct **vmas, int *locked) |
| 2270 | { |
| 2271 | return 0; |
| 2272 | } |
| 2273 | |
| 2274 | static long __get_user_pages_remote(struct mm_struct *mm, |
| 2275 | unsigned long start, unsigned long nr_pages, |
| 2276 | unsigned int gup_flags, struct page **pages, |
| 2277 | struct vm_area_struct **vmas, int *locked) |
| 2278 | { |
| 2279 | return 0; |
| 2280 | } |
| 2281 | #endif /* !CONFIG_MMU */ |
| 2282 | |
| 2283 | /** |
| 2284 | * get_user_pages() - pin user pages in memory |
| 2285 | * @start: starting user address |
| 2286 | * @nr_pages: number of pages from start to pin |
| 2287 | * @gup_flags: flags modifying lookup behaviour |
| 2288 | * @pages: array that receives pointers to the pages pinned. |
| 2289 | * Should be at least nr_pages long. Or NULL, if caller |
| 2290 | * only intends to ensure the pages are faulted in. |
| 2291 | * @vmas: array of pointers to vmas corresponding to each page. |
| 2292 | * Or NULL if the caller does not require them. |
| 2293 | * |
| 2294 | * This is the same as get_user_pages_remote(), just with a less-flexible |
| 2295 | * calling convention where we assume that the mm being operated on belongs to |
| 2296 | * the current task, and doesn't allow passing of a locked parameter. We also |
| 2297 | * obviously don't pass FOLL_REMOTE in here. |
| 2298 | */ |
| 2299 | long get_user_pages(unsigned long start, unsigned long nr_pages, |
| 2300 | unsigned int gup_flags, struct page **pages, |
| 2301 | struct vm_area_struct **vmas) |
| 2302 | { |
| 2303 | if (!is_valid_gup_flags(gup_flags)) |
| 2304 | return -EINVAL; |
| 2305 | |
| 2306 | return __gup_longterm_locked(current->mm, start, nr_pages, |
| 2307 | pages, vmas, gup_flags | FOLL_TOUCH); |
| 2308 | } |
| 2309 | EXPORT_SYMBOL(get_user_pages); |
| 2310 | |
| 2311 | /* |
| 2312 | * get_user_pages_unlocked() is suitable to replace the form: |
| 2313 | * |
| 2314 | * mmap_read_lock(mm); |
| 2315 | * get_user_pages(mm, ..., pages, NULL); |
| 2316 | * mmap_read_unlock(mm); |
| 2317 | * |
| 2318 | * with: |
| 2319 | * |
| 2320 | * get_user_pages_unlocked(mm, ..., pages); |
| 2321 | * |
| 2322 | * It is functionally equivalent to get_user_pages_fast so |
| 2323 | * get_user_pages_fast should be used instead if specific gup_flags |
| 2324 | * (e.g. FOLL_FORCE) are not required. |
| 2325 | */ |
| 2326 | long get_user_pages_unlocked(unsigned long start, unsigned long nr_pages, |
| 2327 | struct page **pages, unsigned int gup_flags) |
| 2328 | { |
| 2329 | struct mm_struct *mm = current->mm; |
| 2330 | int locked = 1; |
| 2331 | long ret; |
| 2332 | |
| 2333 | /* |
| 2334 | * FIXME: Current FOLL_LONGTERM behavior is incompatible with |
| 2335 | * FAULT_FLAG_ALLOW_RETRY because of the FS DAX check requirement on |
| 2336 | * vmas. As there are no users of this flag in this call we simply |
| 2337 | * disallow this option for now. |
| 2338 | */ |
| 2339 | if (WARN_ON_ONCE(gup_flags & FOLL_LONGTERM)) |
| 2340 | return -EINVAL; |
| 2341 | |
| 2342 | mmap_read_lock(mm); |
| 2343 | ret = __get_user_pages_locked(mm, start, nr_pages, pages, NULL, |
| 2344 | &locked, gup_flags | FOLL_TOUCH); |
| 2345 | if (locked) |
| 2346 | mmap_read_unlock(mm); |
| 2347 | return ret; |
| 2348 | } |
| 2349 | EXPORT_SYMBOL(get_user_pages_unlocked); |
| 2350 | |
| 2351 | /* |
| 2352 | * Fast GUP |
| 2353 | * |
| 2354 | * get_user_pages_fast attempts to pin user pages by walking the page |
| 2355 | * tables directly and avoids taking locks. Thus the walker needs to be |
| 2356 | * protected from page table pages being freed from under it, and should |
| 2357 | * block any THP splits. |
| 2358 | * |
| 2359 | * One way to achieve this is to have the walker disable interrupts, and |
| 2360 | * rely on IPIs from the TLB flushing code blocking before the page table |
| 2361 | * pages are freed. This is unsuitable for architectures that do not need |
| 2362 | * to broadcast an IPI when invalidating TLBs. |
| 2363 | * |
| 2364 | * Another way to achieve this is to batch up page table containing pages |
| 2365 | * belonging to more than one mm_user, then rcu_sched a callback to free those |
| 2366 | * pages. Disabling interrupts will allow the fast_gup walker to both block |
| 2367 | * the rcu_sched callback, and an IPI that we broadcast for splitting THPs |
| 2368 | * (which is a relatively rare event). The code below adopts this strategy. |
| 2369 | * |
| 2370 | * Before activating this code, please be aware that the following assumptions |
| 2371 | * are currently made: |
| 2372 | * |
| 2373 | * *) Either MMU_GATHER_RCU_TABLE_FREE is enabled, and tlb_remove_table() is used to |
| 2374 | * free pages containing page tables or TLB flushing requires IPI broadcast. |
| 2375 | * |
| 2376 | * *) ptes can be read atomically by the architecture. |
| 2377 | * |
| 2378 | * *) access_ok is sufficient to validate userspace address ranges. |
| 2379 | * |
| 2380 | * The last two assumptions can be relaxed by the addition of helper functions. |
| 2381 | * |
| 2382 | * This code is based heavily on the PowerPC implementation by Nick Piggin. |
| 2383 | */ |
| 2384 | #ifdef CONFIG_HAVE_FAST_GUP |
| 2385 | |
| 2386 | static void __maybe_unused undo_dev_pagemap(int *nr, int nr_start, |
| 2387 | unsigned int flags, |
| 2388 | struct page **pages) |
| 2389 | { |
| 2390 | while ((*nr) - nr_start) { |
| 2391 | struct page *page = pages[--(*nr)]; |
| 2392 | |
| 2393 | ClearPageReferenced(page); |
| 2394 | if (flags & FOLL_PIN) |
| 2395 | unpin_user_page(page); |
| 2396 | else |
| 2397 | put_page(page); |
| 2398 | } |
| 2399 | } |
| 2400 | |
| 2401 | #ifdef CONFIG_ARCH_HAS_PTE_SPECIAL |
| 2402 | /* |
| 2403 | * Fast-gup relies on pte change detection to avoid concurrent pgtable |
| 2404 | * operations. |
| 2405 | * |
| 2406 | * To pin the page, fast-gup needs to do below in order: |
| 2407 | * (1) pin the page (by prefetching pte), then (2) check pte not changed. |
| 2408 | * |
| 2409 | * For the rest of pgtable operations where pgtable updates can be racy |
| 2410 | * with fast-gup, we need to do (1) clear pte, then (2) check whether page |
| 2411 | * is pinned. |
| 2412 | * |
| 2413 | * Above will work for all pte-level operations, including THP split. |
| 2414 | * |
| 2415 | * For THP collapse, it's a bit more complicated because fast-gup may be |
| 2416 | * walking a pgtable page that is being freed (pte is still valid but pmd |
| 2417 | * can be cleared already). To avoid race in such condition, we need to |
| 2418 | * also check pmd here to make sure pmd doesn't change (corresponds to |
| 2419 | * pmdp_collapse_flush() in the THP collapse code path). |
| 2420 | */ |
| 2421 | static int gup_pte_range(pmd_t pmd, pmd_t *pmdp, unsigned long addr, |
| 2422 | unsigned long end, unsigned int flags, |
| 2423 | struct page **pages, int *nr) |
| 2424 | { |
| 2425 | struct dev_pagemap *pgmap = NULL; |
| 2426 | int nr_start = *nr, ret = 0; |
| 2427 | pte_t *ptep, *ptem; |
| 2428 | |
| 2429 | ptem = ptep = pte_offset_map(&pmd, addr); |
| 2430 | do { |
| 2431 | pte_t pte = ptep_get_lockless(ptep); |
| 2432 | struct page *page; |
| 2433 | struct folio *folio; |
| 2434 | |
| 2435 | if (pte_protnone(pte) && !gup_can_follow_protnone(flags)) |
| 2436 | goto pte_unmap; |
| 2437 | |
| 2438 | if (!pte_access_permitted(pte, flags & FOLL_WRITE)) |
| 2439 | goto pte_unmap; |
| 2440 | |
| 2441 | if (pte_devmap(pte)) { |
| 2442 | if (unlikely(flags & FOLL_LONGTERM)) |
| 2443 | goto pte_unmap; |
| 2444 | |
| 2445 | pgmap = get_dev_pagemap(pte_pfn(pte), pgmap); |
| 2446 | if (unlikely(!pgmap)) { |
| 2447 | undo_dev_pagemap(nr, nr_start, flags, pages); |
| 2448 | goto pte_unmap; |
| 2449 | } |
| 2450 | } else if (pte_special(pte)) |
| 2451 | goto pte_unmap; |
| 2452 | |
| 2453 | VM_BUG_ON(!pfn_valid(pte_pfn(pte))); |
| 2454 | page = pte_page(pte); |
| 2455 | |
| 2456 | folio = try_grab_folio(page, 1, flags); |
| 2457 | if (!folio) |
| 2458 | goto pte_unmap; |
| 2459 | |
| 2460 | if (unlikely(page_is_secretmem(page))) { |
| 2461 | gup_put_folio(folio, 1, flags); |
| 2462 | goto pte_unmap; |
| 2463 | } |
| 2464 | |
| 2465 | if (unlikely(pmd_val(pmd) != pmd_val(*pmdp)) || |
| 2466 | unlikely(pte_val(pte) != pte_val(*ptep))) { |
| 2467 | gup_put_folio(folio, 1, flags); |
| 2468 | goto pte_unmap; |
| 2469 | } |
| 2470 | |
| 2471 | if (!pte_write(pte) && gup_must_unshare(flags, page)) { |
| 2472 | gup_put_folio(folio, 1, flags); |
| 2473 | goto pte_unmap; |
| 2474 | } |
| 2475 | |
| 2476 | /* |
| 2477 | * We need to make the page accessible if and only if we are |
| 2478 | * going to access its content (the FOLL_PIN case). Please |
| 2479 | * see Documentation/core-api/pin_user_pages.rst for |
| 2480 | * details. |
| 2481 | */ |
| 2482 | if (flags & FOLL_PIN) { |
| 2483 | ret = arch_make_page_accessible(page); |
| 2484 | if (ret) { |
| 2485 | gup_put_folio(folio, 1, flags); |
| 2486 | goto pte_unmap; |
| 2487 | } |
| 2488 | } |
| 2489 | folio_set_referenced(folio); |
| 2490 | pages[*nr] = page; |
| 2491 | (*nr)++; |
| 2492 | } while (ptep++, addr += PAGE_SIZE, addr != end); |
| 2493 | |
| 2494 | ret = 1; |
| 2495 | |
| 2496 | pte_unmap: |
| 2497 | if (pgmap) |
| 2498 | put_dev_pagemap(pgmap); |
| 2499 | pte_unmap(ptem); |
| 2500 | return ret; |
| 2501 | } |
| 2502 | #else |
| 2503 | |
| 2504 | /* |
| 2505 | * If we can't determine whether or not a pte is special, then fail immediately |
| 2506 | * for ptes. Note, we can still pin HugeTLB and THP as these are guaranteed not |
| 2507 | * to be special. |
| 2508 | * |
| 2509 | * For a futex to be placed on a THP tail page, get_futex_key requires a |
| 2510 | * get_user_pages_fast_only implementation that can pin pages. Thus it's still |
| 2511 | * useful to have gup_huge_pmd even if we can't operate on ptes. |
| 2512 | */ |
| 2513 | static int gup_pte_range(pmd_t pmd, pmd_t *pmdp, unsigned long addr, |
| 2514 | unsigned long end, unsigned int flags, |
| 2515 | struct page **pages, int *nr) |
| 2516 | { |
| 2517 | return 0; |
| 2518 | } |
| 2519 | #endif /* CONFIG_ARCH_HAS_PTE_SPECIAL */ |
| 2520 | |
| 2521 | #if defined(CONFIG_ARCH_HAS_PTE_DEVMAP) && defined(CONFIG_TRANSPARENT_HUGEPAGE) |
| 2522 | static int __gup_device_huge(unsigned long pfn, unsigned long addr, |
| 2523 | unsigned long end, unsigned int flags, |
| 2524 | struct page **pages, int *nr) |
| 2525 | { |
| 2526 | int nr_start = *nr; |
| 2527 | struct dev_pagemap *pgmap = NULL; |
| 2528 | |
| 2529 | do { |
| 2530 | struct page *page = pfn_to_page(pfn); |
| 2531 | |
| 2532 | pgmap = get_dev_pagemap(pfn, pgmap); |
| 2533 | if (unlikely(!pgmap)) { |
| 2534 | undo_dev_pagemap(nr, nr_start, flags, pages); |
| 2535 | break; |
| 2536 | } |
| 2537 | SetPageReferenced(page); |
| 2538 | pages[*nr] = page; |
| 2539 | if (unlikely(!try_grab_page(page, flags))) { |
| 2540 | undo_dev_pagemap(nr, nr_start, flags, pages); |
| 2541 | break; |
| 2542 | } |
| 2543 | (*nr)++; |
| 2544 | pfn++; |
| 2545 | } while (addr += PAGE_SIZE, addr != end); |
| 2546 | |
| 2547 | put_dev_pagemap(pgmap); |
| 2548 | return addr == end; |
| 2549 | } |
| 2550 | |
| 2551 | static int __gup_device_huge_pmd(pmd_t orig, pmd_t *pmdp, unsigned long addr, |
| 2552 | unsigned long end, unsigned int flags, |
| 2553 | struct page **pages, int *nr) |
| 2554 | { |
| 2555 | unsigned long fault_pfn; |
| 2556 | int nr_start = *nr; |
| 2557 | |
| 2558 | fault_pfn = pmd_pfn(orig) + ((addr & ~PMD_MASK) >> PAGE_SHIFT); |
| 2559 | if (!__gup_device_huge(fault_pfn, addr, end, flags, pages, nr)) |
| 2560 | return 0; |
| 2561 | |
| 2562 | if (unlikely(pmd_val(orig) != pmd_val(*pmdp))) { |
| 2563 | undo_dev_pagemap(nr, nr_start, flags, pages); |
| 2564 | return 0; |
| 2565 | } |
| 2566 | return 1; |
| 2567 | } |
| 2568 | |
| 2569 | static int __gup_device_huge_pud(pud_t orig, pud_t *pudp, unsigned long addr, |
| 2570 | unsigned long end, unsigned int flags, |
| 2571 | struct page **pages, int *nr) |
| 2572 | { |
| 2573 | unsigned long fault_pfn; |
| 2574 | int nr_start = *nr; |
| 2575 | |
| 2576 | fault_pfn = pud_pfn(orig) + ((addr & ~PUD_MASK) >> PAGE_SHIFT); |
| 2577 | if (!__gup_device_huge(fault_pfn, addr, end, flags, pages, nr)) |
| 2578 | return 0; |
| 2579 | |
| 2580 | if (unlikely(pud_val(orig) != pud_val(*pudp))) { |
| 2581 | undo_dev_pagemap(nr, nr_start, flags, pages); |
| 2582 | return 0; |
| 2583 | } |
| 2584 | return 1; |
| 2585 | } |
| 2586 | #else |
| 2587 | static int __gup_device_huge_pmd(pmd_t orig, pmd_t *pmdp, unsigned long addr, |
| 2588 | unsigned long end, unsigned int flags, |
| 2589 | struct page **pages, int *nr) |
| 2590 | { |
| 2591 | BUILD_BUG(); |
| 2592 | return 0; |
| 2593 | } |
| 2594 | |
| 2595 | static int __gup_device_huge_pud(pud_t pud, pud_t *pudp, unsigned long addr, |
| 2596 | unsigned long end, unsigned int flags, |
| 2597 | struct page **pages, int *nr) |
| 2598 | { |
| 2599 | BUILD_BUG(); |
| 2600 | return 0; |
| 2601 | } |
| 2602 | #endif |
| 2603 | |
| 2604 | static int record_subpages(struct page *page, unsigned long addr, |
| 2605 | unsigned long end, struct page **pages) |
| 2606 | { |
| 2607 | int nr; |
| 2608 | |
| 2609 | for (nr = 0; addr != end; nr++, addr += PAGE_SIZE) |
| 2610 | pages[nr] = nth_page(page, nr); |
| 2611 | |
| 2612 | return nr; |
| 2613 | } |
| 2614 | |
| 2615 | #ifdef CONFIG_ARCH_HAS_HUGEPD |
| 2616 | static unsigned long hugepte_addr_end(unsigned long addr, unsigned long end, |
| 2617 | unsigned long sz) |
| 2618 | { |
| 2619 | unsigned long __boundary = (addr + sz) & ~(sz-1); |
| 2620 | return (__boundary - 1 < end - 1) ? __boundary : end; |
| 2621 | } |
| 2622 | |
| 2623 | static int gup_hugepte(pte_t *ptep, unsigned long sz, unsigned long addr, |
| 2624 | unsigned long end, unsigned int flags, |
| 2625 | struct page **pages, int *nr) |
| 2626 | { |
| 2627 | unsigned long pte_end; |
| 2628 | struct page *page; |
| 2629 | struct folio *folio; |
| 2630 | pte_t pte; |
| 2631 | int refs; |
| 2632 | |
| 2633 | pte_end = (addr + sz) & ~(sz-1); |
| 2634 | if (pte_end < end) |
| 2635 | end = pte_end; |
| 2636 | |
| 2637 | pte = huge_ptep_get(ptep); |
| 2638 | |
| 2639 | if (!pte_access_permitted(pte, flags & FOLL_WRITE)) |
| 2640 | return 0; |
| 2641 | |
| 2642 | /* hugepages are never "special" */ |
| 2643 | VM_BUG_ON(!pfn_valid(pte_pfn(pte))); |
| 2644 | |
| 2645 | page = nth_page(pte_page(pte), (addr & (sz - 1)) >> PAGE_SHIFT); |
| 2646 | refs = record_subpages(page, addr, end, pages + *nr); |
| 2647 | |
| 2648 | folio = try_grab_folio(page, refs, flags); |
| 2649 | if (!folio) |
| 2650 | return 0; |
| 2651 | |
| 2652 | if (unlikely(pte_val(pte) != pte_val(*ptep))) { |
| 2653 | gup_put_folio(folio, refs, flags); |
| 2654 | return 0; |
| 2655 | } |
| 2656 | |
| 2657 | if (!pte_write(pte) && gup_must_unshare(flags, &folio->page)) { |
| 2658 | gup_put_folio(folio, refs, flags); |
| 2659 | return 0; |
| 2660 | } |
| 2661 | |
| 2662 | *nr += refs; |
| 2663 | folio_set_referenced(folio); |
| 2664 | return 1; |
| 2665 | } |
| 2666 | |
| 2667 | static int gup_huge_pd(hugepd_t hugepd, unsigned long addr, |
| 2668 | unsigned int pdshift, unsigned long end, unsigned int flags, |
| 2669 | struct page **pages, int *nr) |
| 2670 | { |
| 2671 | pte_t *ptep; |
| 2672 | unsigned long sz = 1UL << hugepd_shift(hugepd); |
| 2673 | unsigned long next; |
| 2674 | |
| 2675 | ptep = hugepte_offset(hugepd, addr, pdshift); |
| 2676 | do { |
| 2677 | next = hugepte_addr_end(addr, end, sz); |
| 2678 | if (!gup_hugepte(ptep, sz, addr, end, flags, pages, nr)) |
| 2679 | return 0; |
| 2680 | } while (ptep++, addr = next, addr != end); |
| 2681 | |
| 2682 | return 1; |
| 2683 | } |
| 2684 | #else |
| 2685 | static inline int gup_huge_pd(hugepd_t hugepd, unsigned long addr, |
| 2686 | unsigned int pdshift, unsigned long end, unsigned int flags, |
| 2687 | struct page **pages, int *nr) |
| 2688 | { |
| 2689 | return 0; |
| 2690 | } |
| 2691 | #endif /* CONFIG_ARCH_HAS_HUGEPD */ |
| 2692 | |
| 2693 | static int gup_huge_pmd(pmd_t orig, pmd_t *pmdp, unsigned long addr, |
| 2694 | unsigned long end, unsigned int flags, |
| 2695 | struct page **pages, int *nr) |
| 2696 | { |
| 2697 | struct page *page; |
| 2698 | struct folio *folio; |
| 2699 | int refs; |
| 2700 | |
| 2701 | if (!pmd_access_permitted(orig, flags & FOLL_WRITE)) |
| 2702 | return 0; |
| 2703 | |
| 2704 | if (pmd_devmap(orig)) { |
| 2705 | if (unlikely(flags & FOLL_LONGTERM)) |
| 2706 | return 0; |
| 2707 | return __gup_device_huge_pmd(orig, pmdp, addr, end, flags, |
| 2708 | pages, nr); |
| 2709 | } |
| 2710 | |
| 2711 | page = nth_page(pmd_page(orig), (addr & ~PMD_MASK) >> PAGE_SHIFT); |
| 2712 | refs = record_subpages(page, addr, end, pages + *nr); |
| 2713 | |
| 2714 | folio = try_grab_folio(page, refs, flags); |
| 2715 | if (!folio) |
| 2716 | return 0; |
| 2717 | |
| 2718 | if (unlikely(pmd_val(orig) != pmd_val(*pmdp))) { |
| 2719 | gup_put_folio(folio, refs, flags); |
| 2720 | return 0; |
| 2721 | } |
| 2722 | |
| 2723 | if (!pmd_write(orig) && gup_must_unshare(flags, &folio->page)) { |
| 2724 | gup_put_folio(folio, refs, flags); |
| 2725 | return 0; |
| 2726 | } |
| 2727 | |
| 2728 | *nr += refs; |
| 2729 | folio_set_referenced(folio); |
| 2730 | return 1; |
| 2731 | } |
| 2732 | |
| 2733 | static int gup_huge_pud(pud_t orig, pud_t *pudp, unsigned long addr, |
| 2734 | unsigned long end, unsigned int flags, |
| 2735 | struct page **pages, int *nr) |
| 2736 | { |
| 2737 | struct page *page; |
| 2738 | struct folio *folio; |
| 2739 | int refs; |
| 2740 | |
| 2741 | if (!pud_access_permitted(orig, flags & FOLL_WRITE)) |
| 2742 | return 0; |
| 2743 | |
| 2744 | if (pud_devmap(orig)) { |
| 2745 | if (unlikely(flags & FOLL_LONGTERM)) |
| 2746 | return 0; |
| 2747 | return __gup_device_huge_pud(orig, pudp, addr, end, flags, |
| 2748 | pages, nr); |
| 2749 | } |
| 2750 | |
| 2751 | page = nth_page(pud_page(orig), (addr & ~PUD_MASK) >> PAGE_SHIFT); |
| 2752 | refs = record_subpages(page, addr, end, pages + *nr); |
| 2753 | |
| 2754 | folio = try_grab_folio(page, refs, flags); |
| 2755 | if (!folio) |
| 2756 | return 0; |
| 2757 | |
| 2758 | if (unlikely(pud_val(orig) != pud_val(*pudp))) { |
| 2759 | gup_put_folio(folio, refs, flags); |
| 2760 | return 0; |
| 2761 | } |
| 2762 | |
| 2763 | if (!pud_write(orig) && gup_must_unshare(flags, &folio->page)) { |
| 2764 | gup_put_folio(folio, refs, flags); |
| 2765 | return 0; |
| 2766 | } |
| 2767 | |
| 2768 | *nr += refs; |
| 2769 | folio_set_referenced(folio); |
| 2770 | return 1; |
| 2771 | } |
| 2772 | |
| 2773 | static int gup_huge_pgd(pgd_t orig, pgd_t *pgdp, unsigned long addr, |
| 2774 | unsigned long end, unsigned int flags, |
| 2775 | struct page **pages, int *nr) |
| 2776 | { |
| 2777 | int refs; |
| 2778 | struct page *page; |
| 2779 | struct folio *folio; |
| 2780 | |
| 2781 | if (!pgd_access_permitted(orig, flags & FOLL_WRITE)) |
| 2782 | return 0; |
| 2783 | |
| 2784 | BUILD_BUG_ON(pgd_devmap(orig)); |
| 2785 | |
| 2786 | page = nth_page(pgd_page(orig), (addr & ~PGDIR_MASK) >> PAGE_SHIFT); |
| 2787 | refs = record_subpages(page, addr, end, pages + *nr); |
| 2788 | |
| 2789 | folio = try_grab_folio(page, refs, flags); |
| 2790 | if (!folio) |
| 2791 | return 0; |
| 2792 | |
| 2793 | if (unlikely(pgd_val(orig) != pgd_val(*pgdp))) { |
| 2794 | gup_put_folio(folio, refs, flags); |
| 2795 | return 0; |
| 2796 | } |
| 2797 | |
| 2798 | *nr += refs; |
| 2799 | folio_set_referenced(folio); |
| 2800 | return 1; |
| 2801 | } |
| 2802 | |
| 2803 | static int gup_pmd_range(pud_t *pudp, pud_t pud, unsigned long addr, unsigned long end, |
| 2804 | unsigned int flags, struct page **pages, int *nr) |
| 2805 | { |
| 2806 | unsigned long next; |
| 2807 | pmd_t *pmdp; |
| 2808 | |
| 2809 | pmdp = pmd_offset_lockless(pudp, pud, addr); |
| 2810 | do { |
| 2811 | pmd_t pmd = READ_ONCE(*pmdp); |
| 2812 | |
| 2813 | next = pmd_addr_end(addr, end); |
| 2814 | if (!pmd_present(pmd)) |
| 2815 | return 0; |
| 2816 | |
| 2817 | if (unlikely(pmd_trans_huge(pmd) || pmd_huge(pmd) || |
| 2818 | pmd_devmap(pmd))) { |
| 2819 | if (pmd_protnone(pmd) && |
| 2820 | !gup_can_follow_protnone(flags)) |
| 2821 | return 0; |
| 2822 | |
| 2823 | if (!gup_huge_pmd(pmd, pmdp, addr, next, flags, |
| 2824 | pages, nr)) |
| 2825 | return 0; |
| 2826 | |
| 2827 | } else if (unlikely(is_hugepd(__hugepd(pmd_val(pmd))))) { |
| 2828 | /* |
| 2829 | * architecture have different format for hugetlbfs |
| 2830 | * pmd format and THP pmd format |
| 2831 | */ |
| 2832 | if (!gup_huge_pd(__hugepd(pmd_val(pmd)), addr, |
| 2833 | PMD_SHIFT, next, flags, pages, nr)) |
| 2834 | return 0; |
| 2835 | } else if (!gup_pte_range(pmd, pmdp, addr, next, flags, pages, nr)) |
| 2836 | return 0; |
| 2837 | } while (pmdp++, addr = next, addr != end); |
| 2838 | |
| 2839 | return 1; |
| 2840 | } |
| 2841 | |
| 2842 | static int gup_pud_range(p4d_t *p4dp, p4d_t p4d, unsigned long addr, unsigned long end, |
| 2843 | unsigned int flags, struct page **pages, int *nr) |
| 2844 | { |
| 2845 | unsigned long next; |
| 2846 | pud_t *pudp; |
| 2847 | |
| 2848 | pudp = pud_offset_lockless(p4dp, p4d, addr); |
| 2849 | do { |
| 2850 | pud_t pud = READ_ONCE(*pudp); |
| 2851 | |
| 2852 | next = pud_addr_end(addr, end); |
| 2853 | if (unlikely(!pud_present(pud))) |
| 2854 | return 0; |
| 2855 | if (unlikely(pud_huge(pud))) { |
| 2856 | if (!gup_huge_pud(pud, pudp, addr, next, flags, |
| 2857 | pages, nr)) |
| 2858 | return 0; |
| 2859 | } else if (unlikely(is_hugepd(__hugepd(pud_val(pud))))) { |
| 2860 | if (!gup_huge_pd(__hugepd(pud_val(pud)), addr, |
| 2861 | PUD_SHIFT, next, flags, pages, nr)) |
| 2862 | return 0; |
| 2863 | } else if (!gup_pmd_range(pudp, pud, addr, next, flags, pages, nr)) |
| 2864 | return 0; |
| 2865 | } while (pudp++, addr = next, addr != end); |
| 2866 | |
| 2867 | return 1; |
| 2868 | } |
| 2869 | |
| 2870 | static int gup_p4d_range(pgd_t *pgdp, pgd_t pgd, unsigned long addr, unsigned long end, |
| 2871 | unsigned int flags, struct page **pages, int *nr) |
| 2872 | { |
| 2873 | unsigned long next; |
| 2874 | p4d_t *p4dp; |
| 2875 | |
| 2876 | p4dp = p4d_offset_lockless(pgdp, pgd, addr); |
| 2877 | do { |
| 2878 | p4d_t p4d = READ_ONCE(*p4dp); |
| 2879 | |
| 2880 | next = p4d_addr_end(addr, end); |
| 2881 | if (p4d_none(p4d)) |
| 2882 | return 0; |
| 2883 | BUILD_BUG_ON(p4d_huge(p4d)); |
| 2884 | if (unlikely(is_hugepd(__hugepd(p4d_val(p4d))))) { |
| 2885 | if (!gup_huge_pd(__hugepd(p4d_val(p4d)), addr, |
| 2886 | P4D_SHIFT, next, flags, pages, nr)) |
| 2887 | return 0; |
| 2888 | } else if (!gup_pud_range(p4dp, p4d, addr, next, flags, pages, nr)) |
| 2889 | return 0; |
| 2890 | } while (p4dp++, addr = next, addr != end); |
| 2891 | |
| 2892 | return 1; |
| 2893 | } |
| 2894 | |
| 2895 | static void gup_pgd_range(unsigned long addr, unsigned long end, |
| 2896 | unsigned int flags, struct page **pages, int *nr) |
| 2897 | { |
| 2898 | unsigned long next; |
| 2899 | pgd_t *pgdp; |
| 2900 | |
| 2901 | pgdp = pgd_offset(current->mm, addr); |
| 2902 | do { |
| 2903 | pgd_t pgd = READ_ONCE(*pgdp); |
| 2904 | |
| 2905 | next = pgd_addr_end(addr, end); |
| 2906 | if (pgd_none(pgd)) |
| 2907 | return; |
| 2908 | if (unlikely(pgd_huge(pgd))) { |
| 2909 | if (!gup_huge_pgd(pgd, pgdp, addr, next, flags, |
| 2910 | pages, nr)) |
| 2911 | return; |
| 2912 | } else if (unlikely(is_hugepd(__hugepd(pgd_val(pgd))))) { |
| 2913 | if (!gup_huge_pd(__hugepd(pgd_val(pgd)), addr, |
| 2914 | PGDIR_SHIFT, next, flags, pages, nr)) |
| 2915 | return; |
| 2916 | } else if (!gup_p4d_range(pgdp, pgd, addr, next, flags, pages, nr)) |
| 2917 | return; |
| 2918 | } while (pgdp++, addr = next, addr != end); |
| 2919 | } |
| 2920 | #else |
| 2921 | static inline void gup_pgd_range(unsigned long addr, unsigned long end, |
| 2922 | unsigned int flags, struct page **pages, int *nr) |
| 2923 | { |
| 2924 | } |
| 2925 | #endif /* CONFIG_HAVE_FAST_GUP */ |
| 2926 | |
| 2927 | #ifndef gup_fast_permitted |
| 2928 | /* |
| 2929 | * Check if it's allowed to use get_user_pages_fast_only() for the range, or |
| 2930 | * we need to fall back to the slow version: |
| 2931 | */ |
| 2932 | static bool gup_fast_permitted(unsigned long start, unsigned long end) |
| 2933 | { |
| 2934 | return true; |
| 2935 | } |
| 2936 | #endif |
| 2937 | |
| 2938 | static int __gup_longterm_unlocked(unsigned long start, int nr_pages, |
| 2939 | unsigned int gup_flags, struct page **pages) |
| 2940 | { |
| 2941 | int ret; |
| 2942 | |
| 2943 | /* |
| 2944 | * FIXME: FOLL_LONGTERM does not work with |
| 2945 | * get_user_pages_unlocked() (see comments in that function) |
| 2946 | */ |
| 2947 | if (gup_flags & FOLL_LONGTERM) { |
| 2948 | mmap_read_lock(current->mm); |
| 2949 | ret = __gup_longterm_locked(current->mm, |
| 2950 | start, nr_pages, |
| 2951 | pages, NULL, gup_flags); |
| 2952 | mmap_read_unlock(current->mm); |
| 2953 | } else { |
| 2954 | ret = get_user_pages_unlocked(start, nr_pages, |
| 2955 | pages, gup_flags); |
| 2956 | } |
| 2957 | |
| 2958 | return ret; |
| 2959 | } |
| 2960 | |
| 2961 | static unsigned long lockless_pages_from_mm(unsigned long start, |
| 2962 | unsigned long end, |
| 2963 | unsigned int gup_flags, |
| 2964 | struct page **pages) |
| 2965 | { |
| 2966 | unsigned long flags; |
| 2967 | int nr_pinned = 0; |
| 2968 | unsigned seq; |
| 2969 | |
| 2970 | if (!IS_ENABLED(CONFIG_HAVE_FAST_GUP) || |
| 2971 | !gup_fast_permitted(start, end)) |
| 2972 | return 0; |
| 2973 | |
| 2974 | if (gup_flags & FOLL_PIN) { |
| 2975 | seq = raw_read_seqcount(¤t->mm->write_protect_seq); |
| 2976 | if (seq & 1) |
| 2977 | return 0; |
| 2978 | } |
| 2979 | |
| 2980 | /* |
| 2981 | * Disable interrupts. The nested form is used, in order to allow full, |
| 2982 | * general purpose use of this routine. |
| 2983 | * |
| 2984 | * With interrupts disabled, we block page table pages from being freed |
| 2985 | * from under us. See struct mmu_table_batch comments in |
| 2986 | * include/asm-generic/tlb.h for more details. |
| 2987 | * |
| 2988 | * We do not adopt an rcu_read_lock() here as we also want to block IPIs |
| 2989 | * that come from THPs splitting. |
| 2990 | */ |
| 2991 | local_irq_save(flags); |
| 2992 | gup_pgd_range(start, end, gup_flags, pages, &nr_pinned); |
| 2993 | local_irq_restore(flags); |
| 2994 | |
| 2995 | /* |
| 2996 | * When pinning pages for DMA there could be a concurrent write protect |
| 2997 | * from fork() via copy_page_range(), in this case always fail fast GUP. |
| 2998 | */ |
| 2999 | if (gup_flags & FOLL_PIN) { |
| 3000 | if (read_seqcount_retry(¤t->mm->write_protect_seq, seq)) { |
| 3001 | unpin_user_pages_lockless(pages, nr_pinned); |
| 3002 | return 0; |
| 3003 | } else { |
| 3004 | sanity_check_pinned_pages(pages, nr_pinned); |
| 3005 | } |
| 3006 | } |
| 3007 | return nr_pinned; |
| 3008 | } |
| 3009 | |
| 3010 | static int internal_get_user_pages_fast(unsigned long start, |
| 3011 | unsigned long nr_pages, |
| 3012 | unsigned int gup_flags, |
| 3013 | struct page **pages) |
| 3014 | { |
| 3015 | unsigned long len, end; |
| 3016 | unsigned long nr_pinned; |
| 3017 | int ret; |
| 3018 | |
| 3019 | if (WARN_ON_ONCE(gup_flags & ~(FOLL_WRITE | FOLL_LONGTERM | |
| 3020 | FOLL_FORCE | FOLL_PIN | FOLL_GET | |
| 3021 | FOLL_FAST_ONLY | FOLL_NOFAULT))) |
| 3022 | return -EINVAL; |
| 3023 | |
| 3024 | if (gup_flags & FOLL_PIN) |
| 3025 | mm_set_has_pinned_flag(¤t->mm->flags); |
| 3026 | |
| 3027 | if (!(gup_flags & FOLL_FAST_ONLY)) |
| 3028 | might_lock_read(¤t->mm->mmap_lock); |
| 3029 | |
| 3030 | start = untagged_addr(start) & PAGE_MASK; |
| 3031 | len = nr_pages << PAGE_SHIFT; |
| 3032 | if (check_add_overflow(start, len, &end)) |
| 3033 | return 0; |
| 3034 | if (unlikely(!access_ok((void __user *)start, len))) |
| 3035 | return -EFAULT; |
| 3036 | |
| 3037 | nr_pinned = lockless_pages_from_mm(start, end, gup_flags, pages); |
| 3038 | if (nr_pinned == nr_pages || gup_flags & FOLL_FAST_ONLY) |
| 3039 | return nr_pinned; |
| 3040 | |
| 3041 | /* Slow path: try to get the remaining pages with get_user_pages */ |
| 3042 | start += nr_pinned << PAGE_SHIFT; |
| 3043 | pages += nr_pinned; |
| 3044 | ret = __gup_longterm_unlocked(start, nr_pages - nr_pinned, gup_flags, |
| 3045 | pages); |
| 3046 | if (ret < 0) { |
| 3047 | /* |
| 3048 | * The caller has to unpin the pages we already pinned so |
| 3049 | * returning -errno is not an option |
| 3050 | */ |
| 3051 | if (nr_pinned) |
| 3052 | return nr_pinned; |
| 3053 | return ret; |
| 3054 | } |
| 3055 | return ret + nr_pinned; |
| 3056 | } |
| 3057 | |
| 3058 | /** |
| 3059 | * get_user_pages_fast_only() - pin user pages in memory |
| 3060 | * @start: starting user address |
| 3061 | * @nr_pages: number of pages from start to pin |
| 3062 | * @gup_flags: flags modifying pin behaviour |
| 3063 | * @pages: array that receives pointers to the pages pinned. |
| 3064 | * Should be at least nr_pages long. |
| 3065 | * |
| 3066 | * Like get_user_pages_fast() except it's IRQ-safe in that it won't fall back to |
| 3067 | * the regular GUP. |
| 3068 | * Note a difference with get_user_pages_fast: this always returns the |
| 3069 | * number of pages pinned, 0 if no pages were pinned. |
| 3070 | * |
| 3071 | * If the architecture does not support this function, simply return with no |
| 3072 | * pages pinned. |
| 3073 | * |
| 3074 | * Careful, careful! COW breaking can go either way, so a non-write |
| 3075 | * access can get ambiguous page results. If you call this function without |
| 3076 | * 'write' set, you'd better be sure that you're ok with that ambiguity. |
| 3077 | */ |
| 3078 | int get_user_pages_fast_only(unsigned long start, int nr_pages, |
| 3079 | unsigned int gup_flags, struct page **pages) |
| 3080 | { |
| 3081 | int nr_pinned; |
| 3082 | /* |
| 3083 | * Internally (within mm/gup.c), gup fast variants must set FOLL_GET, |
| 3084 | * because gup fast is always a "pin with a +1 page refcount" request. |
| 3085 | * |
| 3086 | * FOLL_FAST_ONLY is required in order to match the API description of |
| 3087 | * this routine: no fall back to regular ("slow") GUP. |
| 3088 | */ |
| 3089 | gup_flags |= FOLL_GET | FOLL_FAST_ONLY; |
| 3090 | |
| 3091 | nr_pinned = internal_get_user_pages_fast(start, nr_pages, gup_flags, |
| 3092 | pages); |
| 3093 | |
| 3094 | /* |
| 3095 | * As specified in the API description above, this routine is not |
| 3096 | * allowed to return negative values. However, the common core |
| 3097 | * routine internal_get_user_pages_fast() *can* return -errno. |
| 3098 | * Therefore, correct for that here: |
| 3099 | */ |
| 3100 | if (nr_pinned < 0) |
| 3101 | nr_pinned = 0; |
| 3102 | |
| 3103 | return nr_pinned; |
| 3104 | } |
| 3105 | EXPORT_SYMBOL_GPL(get_user_pages_fast_only); |
| 3106 | |
| 3107 | /** |
| 3108 | * get_user_pages_fast() - pin user pages in memory |
| 3109 | * @start: starting user address |
| 3110 | * @nr_pages: number of pages from start to pin |
| 3111 | * @gup_flags: flags modifying pin behaviour |
| 3112 | * @pages: array that receives pointers to the pages pinned. |
| 3113 | * Should be at least nr_pages long. |
| 3114 | * |
| 3115 | * Attempt to pin user pages in memory without taking mm->mmap_lock. |
| 3116 | * If not successful, it will fall back to taking the lock and |
| 3117 | * calling get_user_pages(). |
| 3118 | * |
| 3119 | * Returns number of pages pinned. This may be fewer than the number requested. |
| 3120 | * If nr_pages is 0 or negative, returns 0. If no pages were pinned, returns |
| 3121 | * -errno. |
| 3122 | */ |
| 3123 | int get_user_pages_fast(unsigned long start, int nr_pages, |
| 3124 | unsigned int gup_flags, struct page **pages) |
| 3125 | { |
| 3126 | if (!is_valid_gup_flags(gup_flags)) |
| 3127 | return -EINVAL; |
| 3128 | |
| 3129 | /* |
| 3130 | * The caller may or may not have explicitly set FOLL_GET; either way is |
| 3131 | * OK. However, internally (within mm/gup.c), gup fast variants must set |
| 3132 | * FOLL_GET, because gup fast is always a "pin with a +1 page refcount" |
| 3133 | * request. |
| 3134 | */ |
| 3135 | gup_flags |= FOLL_GET; |
| 3136 | return internal_get_user_pages_fast(start, nr_pages, gup_flags, pages); |
| 3137 | } |
| 3138 | EXPORT_SYMBOL_GPL(get_user_pages_fast); |
| 3139 | |
| 3140 | /** |
| 3141 | * pin_user_pages_fast() - pin user pages in memory without taking locks |
| 3142 | * |
| 3143 | * @start: starting user address |
| 3144 | * @nr_pages: number of pages from start to pin |
| 3145 | * @gup_flags: flags modifying pin behaviour |
| 3146 | * @pages: array that receives pointers to the pages pinned. |
| 3147 | * Should be at least nr_pages long. |
| 3148 | * |
| 3149 | * Nearly the same as get_user_pages_fast(), except that FOLL_PIN is set. See |
| 3150 | * get_user_pages_fast() for documentation on the function arguments, because |
| 3151 | * the arguments here are identical. |
| 3152 | * |
| 3153 | * FOLL_PIN means that the pages must be released via unpin_user_page(). Please |
| 3154 | * see Documentation/core-api/pin_user_pages.rst for further details. |
| 3155 | */ |
| 3156 | int pin_user_pages_fast(unsigned long start, int nr_pages, |
| 3157 | unsigned int gup_flags, struct page **pages) |
| 3158 | { |
| 3159 | /* FOLL_GET and FOLL_PIN are mutually exclusive. */ |
| 3160 | if (WARN_ON_ONCE(gup_flags & FOLL_GET)) |
| 3161 | return -EINVAL; |
| 3162 | |
| 3163 | if (WARN_ON_ONCE(!pages)) |
| 3164 | return -EINVAL; |
| 3165 | |
| 3166 | gup_flags |= FOLL_PIN; |
| 3167 | return internal_get_user_pages_fast(start, nr_pages, gup_flags, pages); |
| 3168 | } |
| 3169 | EXPORT_SYMBOL_GPL(pin_user_pages_fast); |
| 3170 | |
| 3171 | /* |
| 3172 | * This is the FOLL_PIN equivalent of get_user_pages_fast_only(). Behavior |
| 3173 | * is the same, except that this one sets FOLL_PIN instead of FOLL_GET. |
| 3174 | * |
| 3175 | * The API rules are the same, too: no negative values may be returned. |
| 3176 | */ |
| 3177 | int pin_user_pages_fast_only(unsigned long start, int nr_pages, |
| 3178 | unsigned int gup_flags, struct page **pages) |
| 3179 | { |
| 3180 | int nr_pinned; |
| 3181 | |
| 3182 | /* |
| 3183 | * FOLL_GET and FOLL_PIN are mutually exclusive. Note that the API |
| 3184 | * rules require returning 0, rather than -errno: |
| 3185 | */ |
| 3186 | if (WARN_ON_ONCE(gup_flags & FOLL_GET)) |
| 3187 | return 0; |
| 3188 | |
| 3189 | if (WARN_ON_ONCE(!pages)) |
| 3190 | return 0; |
| 3191 | /* |
| 3192 | * FOLL_FAST_ONLY is required in order to match the API description of |
| 3193 | * this routine: no fall back to regular ("slow") GUP. |
| 3194 | */ |
| 3195 | gup_flags |= (FOLL_PIN | FOLL_FAST_ONLY); |
| 3196 | nr_pinned = internal_get_user_pages_fast(start, nr_pages, gup_flags, |
| 3197 | pages); |
| 3198 | /* |
| 3199 | * This routine is not allowed to return negative values. However, |
| 3200 | * internal_get_user_pages_fast() *can* return -errno. Therefore, |
| 3201 | * correct for that here: |
| 3202 | */ |
| 3203 | if (nr_pinned < 0) |
| 3204 | nr_pinned = 0; |
| 3205 | |
| 3206 | return nr_pinned; |
| 3207 | } |
| 3208 | EXPORT_SYMBOL_GPL(pin_user_pages_fast_only); |
| 3209 | |
| 3210 | /** |
| 3211 | * pin_user_pages_remote() - pin pages of a remote process |
| 3212 | * |
| 3213 | * @mm: mm_struct of target mm |
| 3214 | * @start: starting user address |
| 3215 | * @nr_pages: number of pages from start to pin |
| 3216 | * @gup_flags: flags modifying lookup behaviour |
| 3217 | * @pages: array that receives pointers to the pages pinned. |
| 3218 | * Should be at least nr_pages long. |
| 3219 | * @vmas: array of pointers to vmas corresponding to each page. |
| 3220 | * Or NULL if the caller does not require them. |
| 3221 | * @locked: pointer to lock flag indicating whether lock is held and |
| 3222 | * subsequently whether VM_FAULT_RETRY functionality can be |
| 3223 | * utilised. Lock must initially be held. |
| 3224 | * |
| 3225 | * Nearly the same as get_user_pages_remote(), except that FOLL_PIN is set. See |
| 3226 | * get_user_pages_remote() for documentation on the function arguments, because |
| 3227 | * the arguments here are identical. |
| 3228 | * |
| 3229 | * FOLL_PIN means that the pages must be released via unpin_user_page(). Please |
| 3230 | * see Documentation/core-api/pin_user_pages.rst for details. |
| 3231 | */ |
| 3232 | long pin_user_pages_remote(struct mm_struct *mm, |
| 3233 | unsigned long start, unsigned long nr_pages, |
| 3234 | unsigned int gup_flags, struct page **pages, |
| 3235 | struct vm_area_struct **vmas, int *locked) |
| 3236 | { |
| 3237 | /* FOLL_GET and FOLL_PIN are mutually exclusive. */ |
| 3238 | if (WARN_ON_ONCE(gup_flags & FOLL_GET)) |
| 3239 | return -EINVAL; |
| 3240 | |
| 3241 | if (WARN_ON_ONCE(!pages)) |
| 3242 | return -EINVAL; |
| 3243 | |
| 3244 | gup_flags |= FOLL_PIN; |
| 3245 | return __get_user_pages_remote(mm, start, nr_pages, gup_flags, |
| 3246 | pages, vmas, locked); |
| 3247 | } |
| 3248 | EXPORT_SYMBOL(pin_user_pages_remote); |
| 3249 | |
| 3250 | /** |
| 3251 | * pin_user_pages() - pin user pages in memory for use by other devices |
| 3252 | * |
| 3253 | * @start: starting user address |
| 3254 | * @nr_pages: number of pages from start to pin |
| 3255 | * @gup_flags: flags modifying lookup behaviour |
| 3256 | * @pages: array that receives pointers to the pages pinned. |
| 3257 | * Should be at least nr_pages long. |
| 3258 | * @vmas: array of pointers to vmas corresponding to each page. |
| 3259 | * Or NULL if the caller does not require them. |
| 3260 | * |
| 3261 | * Nearly the same as get_user_pages(), except that FOLL_TOUCH is not set, and |
| 3262 | * FOLL_PIN is set. |
| 3263 | * |
| 3264 | * FOLL_PIN means that the pages must be released via unpin_user_page(). Please |
| 3265 | * see Documentation/core-api/pin_user_pages.rst for details. |
| 3266 | */ |
| 3267 | long pin_user_pages(unsigned long start, unsigned long nr_pages, |
| 3268 | unsigned int gup_flags, struct page **pages, |
| 3269 | struct vm_area_struct **vmas) |
| 3270 | { |
| 3271 | /* FOLL_GET and FOLL_PIN are mutually exclusive. */ |
| 3272 | if (WARN_ON_ONCE(gup_flags & FOLL_GET)) |
| 3273 | return -EINVAL; |
| 3274 | |
| 3275 | if (WARN_ON_ONCE(!pages)) |
| 3276 | return -EINVAL; |
| 3277 | |
| 3278 | gup_flags |= FOLL_PIN; |
| 3279 | return __gup_longterm_locked(current->mm, start, nr_pages, |
| 3280 | pages, vmas, gup_flags); |
| 3281 | } |
| 3282 | EXPORT_SYMBOL(pin_user_pages); |
| 3283 | |
| 3284 | /* |
| 3285 | * pin_user_pages_unlocked() is the FOLL_PIN variant of |
| 3286 | * get_user_pages_unlocked(). Behavior is the same, except that this one sets |
| 3287 | * FOLL_PIN and rejects FOLL_GET. |
| 3288 | */ |
| 3289 | long pin_user_pages_unlocked(unsigned long start, unsigned long nr_pages, |
| 3290 | struct page **pages, unsigned int gup_flags) |
| 3291 | { |
| 3292 | /* FOLL_GET and FOLL_PIN are mutually exclusive. */ |
| 3293 | if (WARN_ON_ONCE(gup_flags & FOLL_GET)) |
| 3294 | return -EINVAL; |
| 3295 | |
| 3296 | if (WARN_ON_ONCE(!pages)) |
| 3297 | return -EINVAL; |
| 3298 | |
| 3299 | gup_flags |= FOLL_PIN; |
| 3300 | return get_user_pages_unlocked(start, nr_pages, pages, gup_flags); |
| 3301 | } |
| 3302 | EXPORT_SYMBOL(pin_user_pages_unlocked); |