selftests/damon: fix unnecessary compilation warnings
[linux-block.git] / mm / gup.c
CommitLineData
457c8996 1// SPDX-License-Identifier: GPL-2.0-only
4bbd4c77
KS
2#include <linux/kernel.h>
3#include <linux/errno.h>
4#include <linux/err.h>
5#include <linux/spinlock.h>
6
4bbd4c77 7#include <linux/mm.h>
3565fce3 8#include <linux/memremap.h>
4bbd4c77
KS
9#include <linux/pagemap.h>
10#include <linux/rmap.h>
11#include <linux/swap.h>
12#include <linux/swapops.h>
1507f512 13#include <linux/secretmem.h>
4bbd4c77 14
174cd4b1 15#include <linux/sched/signal.h>
2667f50e 16#include <linux/rwsem.h>
f30c59e9 17#include <linux/hugetlb.h>
9a4e9f3b
AK
18#include <linux/migrate.h>
19#include <linux/mm_inline.h>
20#include <linux/sched/mm.h>
1027e443 21
33a709b2 22#include <asm/mmu_context.h>
1027e443 23#include <asm/tlbflush.h>
2667f50e 24
4bbd4c77
KS
25#include "internal.h"
26
df06b37f
KB
27struct follow_page_context {
28 struct dev_pagemap *pgmap;
29 unsigned int page_mask;
30};
31
b6a2619c
DH
32static inline void sanity_check_pinned_pages(struct page **pages,
33 unsigned long npages)
34{
35 if (!IS_ENABLED(CONFIG_DEBUG_VM))
36 return;
37
38 /*
39 * We only pin anonymous pages if they are exclusive. Once pinned, we
40 * can no longer turn them possibly shared and PageAnonExclusive() will
41 * stick around until the page is freed.
42 *
43 * We'd like to verify that our pinned anonymous pages are still mapped
44 * exclusively. The issue with anon THP is that we don't know how
45 * they are/were mapped when pinning them. However, for anon
46 * THP we can assume that either the given page (PTE-mapped THP) or
47 * the head page (PMD-mapped THP) should be PageAnonExclusive(). If
48 * neither is the case, there is certainly something wrong.
49 */
50 for (; npages; npages--, pages++) {
51 struct page *page = *pages;
52 struct folio *folio = page_folio(page);
53
54 if (!folio_test_anon(folio))
55 continue;
56 if (!folio_test_large(folio) || folio_test_hugetlb(folio))
57 VM_BUG_ON_PAGE(!PageAnonExclusive(&folio->page), page);
58 else
59 /* Either a PTE-mapped or a PMD-mapped THP. */
60 VM_BUG_ON_PAGE(!PageAnonExclusive(&folio->page) &&
61 !PageAnonExclusive(page), page);
62 }
63}
64
cd1adf1b 65/*
ece1ed7b 66 * Return the folio with ref appropriately incremented,
cd1adf1b 67 * or NULL if that failed.
a707cdd5 68 */
ece1ed7b 69static inline struct folio *try_get_folio(struct page *page, int refs)
a707cdd5 70{
ece1ed7b 71 struct folio *folio;
a707cdd5 72
59409373 73retry:
ece1ed7b
MWO
74 folio = page_folio(page);
75 if (WARN_ON_ONCE(folio_ref_count(folio) < 0))
a707cdd5 76 return NULL;
ece1ed7b 77 if (unlikely(!folio_ref_try_add_rcu(folio, refs)))
a707cdd5 78 return NULL;
c24d3732
JH
79
80 /*
ece1ed7b
MWO
81 * At this point we have a stable reference to the folio; but it
82 * could be that between calling page_folio() and the refcount
83 * increment, the folio was split, in which case we'd end up
84 * holding a reference on a folio that has nothing to do with the page
c24d3732 85 * we were given anymore.
ece1ed7b
MWO
86 * So now that the folio is stable, recheck that the page still
87 * belongs to this folio.
c24d3732 88 */
ece1ed7b 89 if (unlikely(page_folio(page) != folio)) {
f4f451a1
MS
90 if (!put_devmap_managed_page_refs(&folio->page, refs))
91 folio_put_refs(folio, refs);
59409373 92 goto retry;
c24d3732
JH
93 }
94
ece1ed7b 95 return folio;
a707cdd5
JH
96}
97
3967db22 98/**
ece1ed7b 99 * try_grab_folio() - Attempt to get or pin a folio.
3967db22 100 * @page: pointer to page to be grabbed
ece1ed7b 101 * @refs: the value to (effectively) add to the folio's refcount
3967db22
JH
102 * @flags: gup flags: these are the FOLL_* flag values.
103 *
3faa52c0 104 * "grab" names in this file mean, "look at flags to decide whether to use
ece1ed7b 105 * FOLL_PIN or FOLL_GET behavior, when incrementing the folio's refcount.
3faa52c0
JH
106 *
107 * Either FOLL_PIN or FOLL_GET (or neither) must be set, but not both at the
108 * same time. (That's true throughout the get_user_pages*() and
109 * pin_user_pages*() APIs.) Cases:
110 *
ece1ed7b 111 * FOLL_GET: folio's refcount will be incremented by @refs.
3967db22 112 *
ece1ed7b
MWO
113 * FOLL_PIN on large folios: folio's refcount will be incremented by
114 * @refs, and its compound_pincount will be incremented by @refs.
3967db22 115 *
ece1ed7b 116 * FOLL_PIN on single-page folios: folio's refcount will be incremented by
5232c63f 117 * @refs * GUP_PIN_COUNTING_BIAS.
3faa52c0 118 *
ece1ed7b
MWO
119 * Return: The folio containing @page (with refcount appropriately
120 * incremented) for success, or NULL upon failure. If neither FOLL_GET
121 * nor FOLL_PIN was set, that's considered failure, and furthermore,
122 * a likely bug in the caller, so a warning is also emitted.
3faa52c0 123 */
ece1ed7b 124struct folio *try_grab_folio(struct page *page, int refs, unsigned int flags)
3faa52c0
JH
125{
126 if (flags & FOLL_GET)
ece1ed7b 127 return try_get_folio(page, refs);
3faa52c0 128 else if (flags & FOLL_PIN) {
ece1ed7b
MWO
129 struct folio *folio;
130
df3a0a21 131 /*
d1e153fe
PT
132 * Can't do FOLL_LONGTERM + FOLL_PIN gup fast path if not in a
133 * right zone, so fail and let the caller fall back to the slow
134 * path.
df3a0a21 135 */
d1e153fe 136 if (unlikely((flags & FOLL_LONGTERM) &&
6077c943 137 !is_longterm_pinnable_page(page)))
df3a0a21
PL
138 return NULL;
139
c24d3732
JH
140 /*
141 * CAUTION: Don't use compound_head() on the page before this
142 * point, the result won't be stable.
143 */
ece1ed7b
MWO
144 folio = try_get_folio(page, refs);
145 if (!folio)
c24d3732
JH
146 return NULL;
147
47e29d32 148 /*
ece1ed7b 149 * When pinning a large folio, use an exact count to track it.
47e29d32 150 *
ece1ed7b
MWO
151 * However, be sure to *also* increment the normal folio
152 * refcount field at least once, so that the folio really
78d9d6ce 153 * is pinned. That's why the refcount from the earlier
ece1ed7b 154 * try_get_folio() is left intact.
47e29d32 155 */
ece1ed7b
MWO
156 if (folio_test_large(folio))
157 atomic_add(refs, folio_pincount_ptr(folio));
c24d3732 158 else
ece1ed7b
MWO
159 folio_ref_add(folio,
160 refs * (GUP_PIN_COUNTING_BIAS - 1));
088b8aa5
DH
161 /*
162 * Adjust the pincount before re-checking the PTE for changes.
163 * This is essentially a smp_mb() and is paired with a memory
164 * barrier in page_try_share_anon_rmap().
165 */
166 smp_mb__after_atomic();
167
ece1ed7b 168 node_stat_mod_folio(folio, NR_FOLL_PIN_ACQUIRED, refs);
47e29d32 169
ece1ed7b 170 return folio;
3faa52c0
JH
171 }
172
173 WARN_ON_ONCE(1);
174 return NULL;
175}
176
d8ddc099 177static void gup_put_folio(struct folio *folio, int refs, unsigned int flags)
4509b42c
JG
178{
179 if (flags & FOLL_PIN) {
d8ddc099
MWO
180 node_stat_mod_folio(folio, NR_FOLL_PIN_RELEASED, refs);
181 if (folio_test_large(folio))
182 atomic_sub(refs, folio_pincount_ptr(folio));
4509b42c
JG
183 else
184 refs *= GUP_PIN_COUNTING_BIAS;
185 }
186
f4f451a1
MS
187 if (!put_devmap_managed_page_refs(&folio->page, refs))
188 folio_put_refs(folio, refs);
4509b42c
JG
189}
190
3faa52c0
JH
191/**
192 * try_grab_page() - elevate a page's refcount by a flag-dependent amount
5fec0719
MWO
193 * @page: pointer to page to be grabbed
194 * @flags: gup flags: these are the FOLL_* flag values.
3faa52c0
JH
195 *
196 * This might not do anything at all, depending on the flags argument.
197 *
198 * "grab" names in this file mean, "look at flags to decide whether to use
199 * FOLL_PIN or FOLL_GET behavior, when incrementing the page's refcount.
200 *
3faa52c0 201 * Either FOLL_PIN or FOLL_GET (or neither) may be set, but not both at the same
ece1ed7b 202 * time. Cases: please see the try_grab_folio() documentation, with
3967db22 203 * "refs=1".
3faa52c0
JH
204 *
205 * Return: true for success, or if no action was required (if neither FOLL_PIN
206 * nor FOLL_GET was set, nothing is done). False for failure: FOLL_GET or
207 * FOLL_PIN was set, but the page could not be grabbed.
208 */
209bool __must_check try_grab_page(struct page *page, unsigned int flags)
210{
5fec0719
MWO
211 struct folio *folio = page_folio(page);
212
c36c04c2 213 WARN_ON_ONCE((flags & (FOLL_GET | FOLL_PIN)) == (FOLL_GET | FOLL_PIN));
5fec0719
MWO
214 if (WARN_ON_ONCE(folio_ref_count(folio) <= 0))
215 return false;
3faa52c0 216
c36c04c2 217 if (flags & FOLL_GET)
5fec0719 218 folio_ref_inc(folio);
c36c04c2 219 else if (flags & FOLL_PIN) {
c36c04c2 220 /*
5fec0719 221 * Similar to try_grab_folio(): be sure to *also*
78d9d6ce
MWO
222 * increment the normal page refcount field at least once,
223 * so that the page really is pinned.
c36c04c2 224 */
5fec0719
MWO
225 if (folio_test_large(folio)) {
226 folio_ref_add(folio, 1);
227 atomic_add(1, folio_pincount_ptr(folio));
8ea2979c 228 } else {
5fec0719 229 folio_ref_add(folio, GUP_PIN_COUNTING_BIAS);
8ea2979c 230 }
c36c04c2 231
5fec0719 232 node_stat_mod_folio(folio, NR_FOLL_PIN_ACQUIRED, 1);
c36c04c2
JH
233 }
234
235 return true;
3faa52c0
JH
236}
237
3faa52c0
JH
238/**
239 * unpin_user_page() - release a dma-pinned page
240 * @page: pointer to page to be released
241 *
242 * Pages that were pinned via pin_user_pages*() must be released via either
243 * unpin_user_page(), or one of the unpin_user_pages*() routines. This is so
244 * that such pages can be separately tracked and uniquely handled. In
245 * particular, interactions with RDMA and filesystems need special handling.
246 */
247void unpin_user_page(struct page *page)
248{
b6a2619c 249 sanity_check_pinned_pages(&page, 1);
d8ddc099 250 gup_put_folio(page_folio(page), 1, FOLL_PIN);
3faa52c0
JH
251}
252EXPORT_SYMBOL(unpin_user_page);
253
659508f9 254static inline struct folio *gup_folio_range_next(struct page *start,
8f39f5fc 255 unsigned long npages, unsigned long i, unsigned int *ntails)
458a4f78 256{
659508f9
MWO
257 struct page *next = nth_page(start, i);
258 struct folio *folio = page_folio(next);
458a4f78
JM
259 unsigned int nr = 1;
260
659508f9 261 if (folio_test_large(folio))
4c654229 262 nr = min_t(unsigned int, npages - i,
659508f9 263 folio_nr_pages(folio) - folio_page_idx(folio, next));
458a4f78 264
458a4f78 265 *ntails = nr;
659508f9 266 return folio;
458a4f78
JM
267}
268
12521c76 269static inline struct folio *gup_folio_next(struct page **list,
28297dbc 270 unsigned long npages, unsigned long i, unsigned int *ntails)
8745d7f6 271{
12521c76 272 struct folio *folio = page_folio(list[i]);
8745d7f6
JM
273 unsigned int nr;
274
8745d7f6 275 for (nr = i + 1; nr < npages; nr++) {
12521c76 276 if (page_folio(list[nr]) != folio)
8745d7f6
JM
277 break;
278 }
279
8745d7f6 280 *ntails = nr - i;
12521c76 281 return folio;
8745d7f6
JM
282}
283
fc1d8e7c 284/**
f1f6a7dd 285 * unpin_user_pages_dirty_lock() - release and optionally dirty gup-pinned pages
2d15eb31 286 * @pages: array of pages to be maybe marked dirty, and definitely released.
fc1d8e7c 287 * @npages: number of pages in the @pages array.
2d15eb31 288 * @make_dirty: whether to mark the pages dirty
fc1d8e7c
JH
289 *
290 * "gup-pinned page" refers to a page that has had one of the get_user_pages()
291 * variants called on that page.
292 *
293 * For each page in the @pages array, make that page (or its head page, if a
2d15eb31 294 * compound page) dirty, if @make_dirty is true, and if the page was previously
f1f6a7dd
JH
295 * listed as clean. In any case, releases all pages using unpin_user_page(),
296 * possibly via unpin_user_pages(), for the non-dirty case.
fc1d8e7c 297 *
f1f6a7dd 298 * Please see the unpin_user_page() documentation for details.
fc1d8e7c 299 *
2d15eb31 300 * set_page_dirty_lock() is used internally. If instead, set_page_dirty() is
301 * required, then the caller should a) verify that this is really correct,
302 * because _lock() is usually required, and b) hand code it:
f1f6a7dd 303 * set_page_dirty_lock(), unpin_user_page().
fc1d8e7c
JH
304 *
305 */
f1f6a7dd
JH
306void unpin_user_pages_dirty_lock(struct page **pages, unsigned long npages,
307 bool make_dirty)
fc1d8e7c 308{
12521c76
MWO
309 unsigned long i;
310 struct folio *folio;
311 unsigned int nr;
2d15eb31 312
313 if (!make_dirty) {
f1f6a7dd 314 unpin_user_pages(pages, npages);
2d15eb31 315 return;
316 }
317
b6a2619c 318 sanity_check_pinned_pages(pages, npages);
12521c76
MWO
319 for (i = 0; i < npages; i += nr) {
320 folio = gup_folio_next(pages, npages, i, &nr);
2d15eb31 321 /*
322 * Checking PageDirty at this point may race with
323 * clear_page_dirty_for_io(), but that's OK. Two key
324 * cases:
325 *
326 * 1) This code sees the page as already dirty, so it
327 * skips the call to set_page_dirty(). That could happen
328 * because clear_page_dirty_for_io() called
329 * page_mkclean(), followed by set_page_dirty().
330 * However, now the page is going to get written back,
331 * which meets the original intention of setting it
332 * dirty, so all is well: clear_page_dirty_for_io() goes
333 * on to call TestClearPageDirty(), and write the page
334 * back.
335 *
336 * 2) This code sees the page as clean, so it calls
337 * set_page_dirty(). The page stays dirty, despite being
338 * written back, so it gets written back again in the
339 * next writeback cycle. This is harmless.
340 */
12521c76
MWO
341 if (!folio_test_dirty(folio)) {
342 folio_lock(folio);
343 folio_mark_dirty(folio);
344 folio_unlock(folio);
345 }
346 gup_put_folio(folio, nr, FOLL_PIN);
2d15eb31 347 }
fc1d8e7c 348}
f1f6a7dd 349EXPORT_SYMBOL(unpin_user_pages_dirty_lock);
fc1d8e7c 350
458a4f78
JM
351/**
352 * unpin_user_page_range_dirty_lock() - release and optionally dirty
353 * gup-pinned page range
354 *
355 * @page: the starting page of a range maybe marked dirty, and definitely released.
356 * @npages: number of consecutive pages to release.
357 * @make_dirty: whether to mark the pages dirty
358 *
359 * "gup-pinned page range" refers to a range of pages that has had one of the
360 * pin_user_pages() variants called on that page.
361 *
362 * For the page ranges defined by [page .. page+npages], make that range (or
363 * its head pages, if a compound page) dirty, if @make_dirty is true, and if the
364 * page range was previously listed as clean.
365 *
366 * set_page_dirty_lock() is used internally. If instead, set_page_dirty() is
367 * required, then the caller should a) verify that this is really correct,
368 * because _lock() is usually required, and b) hand code it:
369 * set_page_dirty_lock(), unpin_user_page().
370 *
371 */
372void unpin_user_page_range_dirty_lock(struct page *page, unsigned long npages,
373 bool make_dirty)
374{
659508f9
MWO
375 unsigned long i;
376 struct folio *folio;
377 unsigned int nr;
378
379 for (i = 0; i < npages; i += nr) {
380 folio = gup_folio_range_next(page, npages, i, &nr);
381 if (make_dirty && !folio_test_dirty(folio)) {
382 folio_lock(folio);
383 folio_mark_dirty(folio);
384 folio_unlock(folio);
385 }
386 gup_put_folio(folio, nr, FOLL_PIN);
458a4f78
JM
387 }
388}
389EXPORT_SYMBOL(unpin_user_page_range_dirty_lock);
390
b6a2619c
DH
391static void unpin_user_pages_lockless(struct page **pages, unsigned long npages)
392{
393 unsigned long i;
394 struct folio *folio;
395 unsigned int nr;
396
397 /*
398 * Don't perform any sanity checks because we might have raced with
399 * fork() and some anonymous pages might now actually be shared --
400 * which is why we're unpinning after all.
401 */
402 for (i = 0; i < npages; i += nr) {
403 folio = gup_folio_next(pages, npages, i, &nr);
404 gup_put_folio(folio, nr, FOLL_PIN);
405 }
406}
407
fc1d8e7c 408/**
f1f6a7dd 409 * unpin_user_pages() - release an array of gup-pinned pages.
fc1d8e7c
JH
410 * @pages: array of pages to be marked dirty and released.
411 * @npages: number of pages in the @pages array.
412 *
f1f6a7dd 413 * For each page in the @pages array, release the page using unpin_user_page().
fc1d8e7c 414 *
f1f6a7dd 415 * Please see the unpin_user_page() documentation for details.
fc1d8e7c 416 */
f1f6a7dd 417void unpin_user_pages(struct page **pages, unsigned long npages)
fc1d8e7c 418{
12521c76
MWO
419 unsigned long i;
420 struct folio *folio;
421 unsigned int nr;
fc1d8e7c 422
146608bb
JH
423 /*
424 * If this WARN_ON() fires, then the system *might* be leaking pages (by
425 * leaving them pinned), but probably not. More likely, gup/pup returned
426 * a hard -ERRNO error to the caller, who erroneously passed it here.
427 */
428 if (WARN_ON(IS_ERR_VALUE(npages)))
429 return;
31b912de 430
b6a2619c 431 sanity_check_pinned_pages(pages, npages);
12521c76
MWO
432 for (i = 0; i < npages; i += nr) {
433 folio = gup_folio_next(pages, npages, i, &nr);
434 gup_put_folio(folio, nr, FOLL_PIN);
e7602748 435 }
fc1d8e7c 436}
f1f6a7dd 437EXPORT_SYMBOL(unpin_user_pages);
fc1d8e7c 438
a458b76a
AA
439/*
440 * Set the MMF_HAS_PINNED if not set yet; after set it'll be there for the mm's
441 * lifecycle. Avoid setting the bit unless necessary, or it might cause write
442 * cache bouncing on large SMP machines for concurrent pinned gups.
443 */
444static inline void mm_set_has_pinned_flag(unsigned long *mm_flags)
445{
446 if (!test_bit(MMF_HAS_PINNED, mm_flags))
447 set_bit(MMF_HAS_PINNED, mm_flags);
448}
449
050a9adc 450#ifdef CONFIG_MMU
69e68b4f
KS
451static struct page *no_page_table(struct vm_area_struct *vma,
452 unsigned int flags)
4bbd4c77 453{
69e68b4f
KS
454 /*
455 * When core dumping an enormous anonymous area that nobody
456 * has touched so far, we don't want to allocate unnecessary pages or
457 * page tables. Return error instead of NULL to skip handle_mm_fault,
458 * then get_dump_page() will return NULL to leave a hole in the dump.
459 * But we can only make this optimization where a hole would surely
460 * be zero-filled if handle_mm_fault() actually did handle it.
461 */
a0137f16
AK
462 if ((flags & FOLL_DUMP) &&
463 (vma_is_anonymous(vma) || !vma->vm_ops->fault))
69e68b4f
KS
464 return ERR_PTR(-EFAULT);
465 return NULL;
466}
4bbd4c77 467
1027e443
KS
468static int follow_pfn_pte(struct vm_area_struct *vma, unsigned long address,
469 pte_t *pte, unsigned int flags)
470{
1027e443
KS
471 if (flags & FOLL_TOUCH) {
472 pte_t entry = *pte;
473
474 if (flags & FOLL_WRITE)
475 entry = pte_mkdirty(entry);
476 entry = pte_mkyoung(entry);
477
478 if (!pte_same(*pte, entry)) {
479 set_pte_at(vma->vm_mm, address, pte, entry);
480 update_mmu_cache(vma, address, pte);
481 }
482 }
483
484 /* Proper page table entry exists, but no corresponding struct page */
485 return -EEXIST;
486}
487
5535be30
DH
488/* FOLL_FORCE can write to even unwritable PTEs in COW mappings. */
489static inline bool can_follow_write_pte(pte_t pte, struct page *page,
490 struct vm_area_struct *vma,
491 unsigned int flags)
19be0eaf 492{
5535be30
DH
493 /* If the pte is writable, we can write to the page. */
494 if (pte_write(pte))
495 return true;
496
497 /* Maybe FOLL_FORCE is set to override it? */
498 if (!(flags & FOLL_FORCE))
499 return false;
500
501 /* But FOLL_FORCE has no effect on shared mappings */
502 if (vma->vm_flags & (VM_MAYSHARE | VM_SHARED))
503 return false;
504
505 /* ... or read-only private ones */
506 if (!(vma->vm_flags & VM_MAYWRITE))
507 return false;
508
509 /* ... or already writable ones that just need to take a write fault */
510 if (vma->vm_flags & VM_WRITE)
511 return false;
512
513 /*
514 * See can_change_pte_writable(): we broke COW and could map the page
515 * writable if we have an exclusive anonymous page ...
516 */
517 if (!page || !PageAnon(page) || !PageAnonExclusive(page))
518 return false;
519
520 /* ... and a write-fault isn't required for other reasons. */
521 if (vma_soft_dirty_enabled(vma) && !pte_soft_dirty(pte))
522 return false;
523 return !userfaultfd_pte_wp(vma, pte);
19be0eaf
LT
524}
525
69e68b4f 526static struct page *follow_page_pte(struct vm_area_struct *vma,
df06b37f
KB
527 unsigned long address, pmd_t *pmd, unsigned int flags,
528 struct dev_pagemap **pgmap)
69e68b4f
KS
529{
530 struct mm_struct *mm = vma->vm_mm;
531 struct page *page;
532 spinlock_t *ptl;
533 pte_t *ptep, pte;
f28d4363 534 int ret;
4bbd4c77 535
eddb1c22
JH
536 /* FOLL_GET and FOLL_PIN are mutually exclusive. */
537 if (WARN_ON_ONCE((flags & (FOLL_PIN | FOLL_GET)) ==
538 (FOLL_PIN | FOLL_GET)))
539 return ERR_PTR(-EINVAL);
69e68b4f 540retry:
4bbd4c77 541 if (unlikely(pmd_bad(*pmd)))
69e68b4f 542 return no_page_table(vma, flags);
4bbd4c77
KS
543
544 ptep = pte_offset_map_lock(mm, pmd, address, &ptl);
4bbd4c77
KS
545 pte = *ptep;
546 if (!pte_present(pte)) {
547 swp_entry_t entry;
548 /*
549 * KSM's break_ksm() relies upon recognizing a ksm page
550 * even while it is being migrated, so for that case we
551 * need migration_entry_wait().
552 */
553 if (likely(!(flags & FOLL_MIGRATION)))
554 goto no_page;
0661a336 555 if (pte_none(pte))
4bbd4c77
KS
556 goto no_page;
557 entry = pte_to_swp_entry(pte);
558 if (!is_migration_entry(entry))
559 goto no_page;
560 pte_unmap_unlock(ptep, ptl);
561 migration_entry_wait(mm, pmd, address);
69e68b4f 562 goto retry;
4bbd4c77 563 }
474098ed 564 if (pte_protnone(pte) && !gup_can_follow_protnone(flags))
4bbd4c77 565 goto no_page;
4bbd4c77
KS
566
567 page = vm_normal_page(vma, address, pte);
5535be30
DH
568
569 /*
570 * We only care about anon pages in can_follow_write_pte() and don't
571 * have to worry about pte_devmap() because they are never anon.
572 */
573 if ((flags & FOLL_WRITE) &&
574 !can_follow_write_pte(pte, page, vma, flags)) {
575 page = NULL;
576 goto out;
577 }
578
3faa52c0 579 if (!page && pte_devmap(pte) && (flags & (FOLL_GET | FOLL_PIN))) {
3565fce3 580 /*
3faa52c0
JH
581 * Only return device mapping pages in the FOLL_GET or FOLL_PIN
582 * case since they are only valid while holding the pgmap
583 * reference.
3565fce3 584 */
df06b37f
KB
585 *pgmap = get_dev_pagemap(pte_pfn(pte), *pgmap);
586 if (*pgmap)
3565fce3
DW
587 page = pte_page(pte);
588 else
589 goto no_page;
590 } else if (unlikely(!page)) {
1027e443
KS
591 if (flags & FOLL_DUMP) {
592 /* Avoid special (like zero) pages in core dumps */
593 page = ERR_PTR(-EFAULT);
594 goto out;
595 }
596
597 if (is_zero_pfn(pte_pfn(pte))) {
598 page = pte_page(pte);
599 } else {
1027e443
KS
600 ret = follow_pfn_pte(vma, address, ptep, flags);
601 page = ERR_PTR(ret);
602 goto out;
603 }
4bbd4c77
KS
604 }
605
a7f22660
DH
606 if (!pte_write(pte) && gup_must_unshare(flags, page)) {
607 page = ERR_PTR(-EMLINK);
608 goto out;
609 }
b6a2619c
DH
610
611 VM_BUG_ON_PAGE((flags & FOLL_PIN) && PageAnon(page) &&
612 !PageAnonExclusive(page), page);
613
3faa52c0
JH
614 /* try_grab_page() does nothing unless FOLL_GET or FOLL_PIN is set. */
615 if (unlikely(!try_grab_page(page, flags))) {
616 page = ERR_PTR(-ENOMEM);
617 goto out;
8fde12ca 618 }
f28d4363
CI
619 /*
620 * We need to make the page accessible if and only if we are going
621 * to access its content (the FOLL_PIN case). Please see
622 * Documentation/core-api/pin_user_pages.rst for details.
623 */
624 if (flags & FOLL_PIN) {
625 ret = arch_make_page_accessible(page);
626 if (ret) {
627 unpin_user_page(page);
628 page = ERR_PTR(ret);
629 goto out;
630 }
631 }
4bbd4c77
KS
632 if (flags & FOLL_TOUCH) {
633 if ((flags & FOLL_WRITE) &&
634 !pte_dirty(pte) && !PageDirty(page))
635 set_page_dirty(page);
636 /*
637 * pte_mkyoung() would be more correct here, but atomic care
638 * is needed to avoid losing the dirty bit: it is easier to use
639 * mark_page_accessed().
640 */
641 mark_page_accessed(page);
642 }
1027e443 643out:
4bbd4c77 644 pte_unmap_unlock(ptep, ptl);
4bbd4c77 645 return page;
4bbd4c77
KS
646no_page:
647 pte_unmap_unlock(ptep, ptl);
648 if (!pte_none(pte))
69e68b4f
KS
649 return NULL;
650 return no_page_table(vma, flags);
651}
652
080dbb61
AK
653static struct page *follow_pmd_mask(struct vm_area_struct *vma,
654 unsigned long address, pud_t *pudp,
df06b37f
KB
655 unsigned int flags,
656 struct follow_page_context *ctx)
69e68b4f 657{
68827280 658 pmd_t *pmd, pmdval;
69e68b4f
KS
659 spinlock_t *ptl;
660 struct page *page;
661 struct mm_struct *mm = vma->vm_mm;
662
080dbb61 663 pmd = pmd_offset(pudp, address);
68827280
HY
664 /*
665 * The READ_ONCE() will stabilize the pmdval in a register or
666 * on the stack so that it will stop changing under the code.
667 */
668 pmdval = READ_ONCE(*pmd);
669 if (pmd_none(pmdval))
69e68b4f 670 return no_page_table(vma, flags);
84c3fc4e 671retry:
68827280 672 if (!pmd_present(pmdval)) {
28b0ee3f
LX
673 /*
674 * Should never reach here, if thp migration is not supported;
675 * Otherwise, it must be a thp migration entry.
676 */
677 VM_BUG_ON(!thp_migration_supported() ||
678 !is_pmd_migration_entry(pmdval));
679
84c3fc4e
ZY
680 if (likely(!(flags & FOLL_MIGRATION)))
681 return no_page_table(vma, flags);
28b0ee3f
LX
682
683 pmd_migration_entry_wait(mm, pmd);
68827280
HY
684 pmdval = READ_ONCE(*pmd);
685 /*
686 * MADV_DONTNEED may convert the pmd to null because
c1e8d7c6 687 * mmap_lock is held in read mode
68827280
HY
688 */
689 if (pmd_none(pmdval))
690 return no_page_table(vma, flags);
84c3fc4e
ZY
691 goto retry;
692 }
68827280 693 if (pmd_devmap(pmdval)) {
3565fce3 694 ptl = pmd_lock(mm, pmd);
df06b37f 695 page = follow_devmap_pmd(vma, address, pmd, flags, &ctx->pgmap);
3565fce3
DW
696 spin_unlock(ptl);
697 if (page)
698 return page;
699 }
68827280 700 if (likely(!pmd_trans_huge(pmdval)))
df06b37f 701 return follow_page_pte(vma, address, pmd, flags, &ctx->pgmap);
6742d293 702
474098ed 703 if (pmd_protnone(pmdval) && !gup_can_follow_protnone(flags))
db08f203
AK
704 return no_page_table(vma, flags);
705
84c3fc4e 706retry_locked:
6742d293 707 ptl = pmd_lock(mm, pmd);
68827280
HY
708 if (unlikely(pmd_none(*pmd))) {
709 spin_unlock(ptl);
710 return no_page_table(vma, flags);
711 }
84c3fc4e
ZY
712 if (unlikely(!pmd_present(*pmd))) {
713 spin_unlock(ptl);
714 if (likely(!(flags & FOLL_MIGRATION)))
715 return no_page_table(vma, flags);
716 pmd_migration_entry_wait(mm, pmd);
717 goto retry_locked;
718 }
6742d293
KS
719 if (unlikely(!pmd_trans_huge(*pmd))) {
720 spin_unlock(ptl);
df06b37f 721 return follow_page_pte(vma, address, pmd, flags, &ctx->pgmap);
6742d293 722 }
4066c119 723 if (flags & FOLL_SPLIT_PMD) {
6742d293
KS
724 int ret;
725 page = pmd_page(*pmd);
726 if (is_huge_zero_page(page)) {
727 spin_unlock(ptl);
728 ret = 0;
78ddc534 729 split_huge_pmd(vma, pmd, address);
337d9abf
NH
730 if (pmd_trans_unstable(pmd))
731 ret = -EBUSY;
4066c119 732 } else {
bfe7b00d
SL
733 spin_unlock(ptl);
734 split_huge_pmd(vma, pmd, address);
735 ret = pte_alloc(mm, pmd) ? -ENOMEM : 0;
6742d293
KS
736 }
737
738 return ret ? ERR_PTR(ret) :
df06b37f 739 follow_page_pte(vma, address, pmd, flags, &ctx->pgmap);
69e68b4f 740 }
6742d293
KS
741 page = follow_trans_huge_pmd(vma, address, pmd, flags);
742 spin_unlock(ptl);
df06b37f 743 ctx->page_mask = HPAGE_PMD_NR - 1;
6742d293 744 return page;
4bbd4c77
KS
745}
746
080dbb61
AK
747static struct page *follow_pud_mask(struct vm_area_struct *vma,
748 unsigned long address, p4d_t *p4dp,
df06b37f
KB
749 unsigned int flags,
750 struct follow_page_context *ctx)
080dbb61
AK
751{
752 pud_t *pud;
753 spinlock_t *ptl;
754 struct page *page;
755 struct mm_struct *mm = vma->vm_mm;
756
757 pud = pud_offset(p4dp, address);
758 if (pud_none(*pud))
759 return no_page_table(vma, flags);
080dbb61
AK
760 if (pud_devmap(*pud)) {
761 ptl = pud_lock(mm, pud);
df06b37f 762 page = follow_devmap_pud(vma, address, pud, flags, &ctx->pgmap);
080dbb61
AK
763 spin_unlock(ptl);
764 if (page)
765 return page;
766 }
767 if (unlikely(pud_bad(*pud)))
768 return no_page_table(vma, flags);
769
df06b37f 770 return follow_pmd_mask(vma, address, pud, flags, ctx);
080dbb61
AK
771}
772
080dbb61
AK
773static struct page *follow_p4d_mask(struct vm_area_struct *vma,
774 unsigned long address, pgd_t *pgdp,
df06b37f
KB
775 unsigned int flags,
776 struct follow_page_context *ctx)
080dbb61
AK
777{
778 p4d_t *p4d;
779
780 p4d = p4d_offset(pgdp, address);
781 if (p4d_none(*p4d))
782 return no_page_table(vma, flags);
783 BUILD_BUG_ON(p4d_huge(*p4d));
784 if (unlikely(p4d_bad(*p4d)))
785 return no_page_table(vma, flags);
786
df06b37f 787 return follow_pud_mask(vma, address, p4d, flags, ctx);
080dbb61
AK
788}
789
790/**
791 * follow_page_mask - look up a page descriptor from a user-virtual address
792 * @vma: vm_area_struct mapping @address
793 * @address: virtual address to look up
794 * @flags: flags modifying lookup behaviour
78179556
MR
795 * @ctx: contains dev_pagemap for %ZONE_DEVICE memory pinning and a
796 * pointer to output page_mask
080dbb61
AK
797 *
798 * @flags can have FOLL_ flags set, defined in <linux/mm.h>
799 *
78179556
MR
800 * When getting pages from ZONE_DEVICE memory, the @ctx->pgmap caches
801 * the device's dev_pagemap metadata to avoid repeating expensive lookups.
802 *
a7f22660
DH
803 * When getting an anonymous page and the caller has to trigger unsharing
804 * of a shared anonymous page first, -EMLINK is returned. The caller should
805 * trigger a fault with FAULT_FLAG_UNSHARE set. Note that unsharing is only
806 * relevant with FOLL_PIN and !FOLL_WRITE.
807 *
78179556
MR
808 * On output, the @ctx->page_mask is set according to the size of the page.
809 *
810 * Return: the mapped (struct page *), %NULL if no mapping exists, or
080dbb61
AK
811 * an error pointer if there is a mapping to something not represented
812 * by a page descriptor (see also vm_normal_page()).
813 */
a7030aea 814static struct page *follow_page_mask(struct vm_area_struct *vma,
080dbb61 815 unsigned long address, unsigned int flags,
df06b37f 816 struct follow_page_context *ctx)
080dbb61
AK
817{
818 pgd_t *pgd;
819 struct page *page;
820 struct mm_struct *mm = vma->vm_mm;
821
df06b37f 822 ctx->page_mask = 0;
080dbb61 823
57a196a5
MK
824 /*
825 * Call hugetlb_follow_page_mask for hugetlb vmas as it will use
826 * special hugetlb page table walking code. This eliminates the
827 * need to check for hugetlb entries in the general walking code.
828 *
829 * hugetlb_follow_page_mask is only for follow_page() handling here.
830 * Ordinary GUP uses follow_hugetlb_page for hugetlb processing.
831 */
832 if (is_vm_hugetlb_page(vma)) {
833 page = hugetlb_follow_page_mask(vma, address, flags);
834 if (!page)
835 page = no_page_table(vma, flags);
080dbb61
AK
836 return page;
837 }
838
839 pgd = pgd_offset(mm, address);
840
841 if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd)))
842 return no_page_table(vma, flags);
843
df06b37f
KB
844 return follow_p4d_mask(vma, address, pgd, flags, ctx);
845}
846
847struct page *follow_page(struct vm_area_struct *vma, unsigned long address,
848 unsigned int foll_flags)
849{
850 struct follow_page_context ctx = { NULL };
851 struct page *page;
852
1507f512
MR
853 if (vma_is_secretmem(vma))
854 return NULL;
855
8909691b
DH
856 if (foll_flags & FOLL_PIN)
857 return NULL;
858
df06b37f
KB
859 page = follow_page_mask(vma, address, foll_flags, &ctx);
860 if (ctx.pgmap)
861 put_dev_pagemap(ctx.pgmap);
862 return page;
080dbb61
AK
863}
864
f2b495ca
KS
865static int get_gate_page(struct mm_struct *mm, unsigned long address,
866 unsigned int gup_flags, struct vm_area_struct **vma,
867 struct page **page)
868{
869 pgd_t *pgd;
c2febafc 870 p4d_t *p4d;
f2b495ca
KS
871 pud_t *pud;
872 pmd_t *pmd;
873 pte_t *pte;
874 int ret = -EFAULT;
875
876 /* user gate pages are read-only */
877 if (gup_flags & FOLL_WRITE)
878 return -EFAULT;
879 if (address > TASK_SIZE)
880 pgd = pgd_offset_k(address);
881 else
882 pgd = pgd_offset_gate(mm, address);
b5d1c39f
AL
883 if (pgd_none(*pgd))
884 return -EFAULT;
c2febafc 885 p4d = p4d_offset(pgd, address);
b5d1c39f
AL
886 if (p4d_none(*p4d))
887 return -EFAULT;
c2febafc 888 pud = pud_offset(p4d, address);
b5d1c39f
AL
889 if (pud_none(*pud))
890 return -EFAULT;
f2b495ca 891 pmd = pmd_offset(pud, address);
84c3fc4e 892 if (!pmd_present(*pmd))
f2b495ca
KS
893 return -EFAULT;
894 VM_BUG_ON(pmd_trans_huge(*pmd));
895 pte = pte_offset_map(pmd, address);
896 if (pte_none(*pte))
897 goto unmap;
898 *vma = get_gate_vma(mm);
899 if (!page)
900 goto out;
901 *page = vm_normal_page(*vma, address, *pte);
902 if (!*page) {
903 if ((gup_flags & FOLL_DUMP) || !is_zero_pfn(pte_pfn(*pte)))
904 goto unmap;
905 *page = pte_page(*pte);
906 }
9fa2dd94 907 if (unlikely(!try_grab_page(*page, gup_flags))) {
8fde12ca
LT
908 ret = -ENOMEM;
909 goto unmap;
910 }
f2b495ca
KS
911out:
912 ret = 0;
913unmap:
914 pte_unmap(pte);
915 return ret;
916}
917
9a95f3cf 918/*
c1e8d7c6
ML
919 * mmap_lock must be held on entry. If @locked != NULL and *@flags
920 * does not include FOLL_NOWAIT, the mmap_lock may be released. If it
4f6da934 921 * is, *@locked will be set to 0 and -EBUSY returned.
9a95f3cf 922 */
64019a2e 923static int faultin_page(struct vm_area_struct *vma,
a7f22660
DH
924 unsigned long address, unsigned int *flags, bool unshare,
925 int *locked)
16744483 926{
16744483 927 unsigned int fault_flags = 0;
2b740303 928 vm_fault_t ret;
16744483 929
55b8fe70
AG
930 if (*flags & FOLL_NOFAULT)
931 return -EFAULT;
16744483
KS
932 if (*flags & FOLL_WRITE)
933 fault_flags |= FAULT_FLAG_WRITE;
1b2ee126
DH
934 if (*flags & FOLL_REMOTE)
935 fault_flags |= FAULT_FLAG_REMOTE;
4f6da934 936 if (locked)
71335f37 937 fault_flags |= FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
16744483
KS
938 if (*flags & FOLL_NOWAIT)
939 fault_flags |= FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_RETRY_NOWAIT;
234b239b 940 if (*flags & FOLL_TRIED) {
4426e945
PX
941 /*
942 * Note: FAULT_FLAG_ALLOW_RETRY and FAULT_FLAG_TRIED
943 * can co-exist
944 */
234b239b
ALC
945 fault_flags |= FAULT_FLAG_TRIED;
946 }
a7f22660
DH
947 if (unshare) {
948 fault_flags |= FAULT_FLAG_UNSHARE;
949 /* FAULT_FLAG_WRITE and FAULT_FLAG_UNSHARE are incompatible */
950 VM_BUG_ON(fault_flags & FAULT_FLAG_WRITE);
951 }
16744483 952
bce617ed 953 ret = handle_mm_fault(vma, address, fault_flags, NULL);
d9272525
PX
954
955 if (ret & VM_FAULT_COMPLETED) {
956 /*
957 * With FAULT_FLAG_RETRY_NOWAIT we'll never release the
958 * mmap lock in the page fault handler. Sanity check this.
959 */
960 WARN_ON_ONCE(fault_flags & FAULT_FLAG_RETRY_NOWAIT);
961 if (locked)
962 *locked = 0;
963 /*
964 * We should do the same as VM_FAULT_RETRY, but let's not
965 * return -EBUSY since that's not reflecting the reality of
966 * what has happened - we've just fully completed a page
967 * fault, with the mmap lock released. Use -EAGAIN to show
968 * that we want to take the mmap lock _again_.
969 */
970 return -EAGAIN;
971 }
972
16744483 973 if (ret & VM_FAULT_ERROR) {
9a291a7c
JM
974 int err = vm_fault_to_errno(ret, *flags);
975
976 if (err)
977 return err;
16744483
KS
978 BUG();
979 }
980
16744483 981 if (ret & VM_FAULT_RETRY) {
4f6da934
PX
982 if (locked && !(fault_flags & FAULT_FLAG_RETRY_NOWAIT))
983 *locked = 0;
16744483
KS
984 return -EBUSY;
985 }
986
16744483
KS
987 return 0;
988}
989
fa5bb209
KS
990static int check_vma_flags(struct vm_area_struct *vma, unsigned long gup_flags)
991{
992 vm_flags_t vm_flags = vma->vm_flags;
1b2ee126
DH
993 int write = (gup_flags & FOLL_WRITE);
994 int foreign = (gup_flags & FOLL_REMOTE);
fa5bb209
KS
995
996 if (vm_flags & (VM_IO | VM_PFNMAP))
997 return -EFAULT;
998
7f7ccc2c
WT
999 if (gup_flags & FOLL_ANON && !vma_is_anonymous(vma))
1000 return -EFAULT;
1001
52650c8b
JG
1002 if ((gup_flags & FOLL_LONGTERM) && vma_is_fsdax(vma))
1003 return -EOPNOTSUPP;
1004
1507f512
MR
1005 if (vma_is_secretmem(vma))
1006 return -EFAULT;
1007
1b2ee126 1008 if (write) {
fa5bb209
KS
1009 if (!(vm_flags & VM_WRITE)) {
1010 if (!(gup_flags & FOLL_FORCE))
1011 return -EFAULT;
1012 /*
1013 * We used to let the write,force case do COW in a
1014 * VM_MAYWRITE VM_SHARED !VM_WRITE vma, so ptrace could
1015 * set a breakpoint in a read-only mapping of an
1016 * executable, without corrupting the file (yet only
1017 * when that file had been opened for writing!).
1018 * Anon pages in shared mappings are surprising: now
1019 * just reject it.
1020 */
46435364 1021 if (!is_cow_mapping(vm_flags))
fa5bb209 1022 return -EFAULT;
fa5bb209
KS
1023 }
1024 } else if (!(vm_flags & VM_READ)) {
1025 if (!(gup_flags & FOLL_FORCE))
1026 return -EFAULT;
1027 /*
1028 * Is there actually any vma we can reach here which does not
1029 * have VM_MAYREAD set?
1030 */
1031 if (!(vm_flags & VM_MAYREAD))
1032 return -EFAULT;
1033 }
d61172b4
DH
1034 /*
1035 * gups are always data accesses, not instruction
1036 * fetches, so execute=false here
1037 */
1038 if (!arch_vma_access_permitted(vma, write, false, foreign))
33a709b2 1039 return -EFAULT;
fa5bb209
KS
1040 return 0;
1041}
1042
4bbd4c77
KS
1043/**
1044 * __get_user_pages() - pin user pages in memory
4bbd4c77
KS
1045 * @mm: mm_struct of target mm
1046 * @start: starting user address
1047 * @nr_pages: number of pages from start to pin
1048 * @gup_flags: flags modifying pin behaviour
1049 * @pages: array that receives pointers to the pages pinned.
1050 * Should be at least nr_pages long. Or NULL, if caller
1051 * only intends to ensure the pages are faulted in.
1052 * @vmas: array of pointers to vmas corresponding to each page.
1053 * Or NULL if the caller does not require them.
c1e8d7c6 1054 * @locked: whether we're still with the mmap_lock held
4bbd4c77 1055 *
d2dfbe47
LX
1056 * Returns either number of pages pinned (which may be less than the
1057 * number requested), or an error. Details about the return value:
1058 *
1059 * -- If nr_pages is 0, returns 0.
1060 * -- If nr_pages is >0, but no pages were pinned, returns -errno.
1061 * -- If nr_pages is >0, and some pages were pinned, returns the number of
1062 * pages pinned. Again, this may be less than nr_pages.
2d3a36a4 1063 * -- 0 return value is possible when the fault would need to be retried.
d2dfbe47
LX
1064 *
1065 * The caller is responsible for releasing returned @pages, via put_page().
1066 *
c1e8d7c6 1067 * @vmas are valid only as long as mmap_lock is held.
4bbd4c77 1068 *
c1e8d7c6 1069 * Must be called with mmap_lock held. It may be released. See below.
4bbd4c77
KS
1070 *
1071 * __get_user_pages walks a process's page tables and takes a reference to
1072 * each struct page that each user address corresponds to at a given
1073 * instant. That is, it takes the page that would be accessed if a user
1074 * thread accesses the given user virtual address at that instant.
1075 *
1076 * This does not guarantee that the page exists in the user mappings when
1077 * __get_user_pages returns, and there may even be a completely different
1078 * page there in some cases (eg. if mmapped pagecache has been invalidated
1079 * and subsequently re faulted). However it does guarantee that the page
1080 * won't be freed completely. And mostly callers simply care that the page
1081 * contains data that was valid *at some point in time*. Typically, an IO
1082 * or similar operation cannot guarantee anything stronger anyway because
1083 * locks can't be held over the syscall boundary.
1084 *
1085 * If @gup_flags & FOLL_WRITE == 0, the page must not be written to. If
1086 * the page is written to, set_page_dirty (or set_page_dirty_lock, as
1087 * appropriate) must be called after the page is finished with, and
1088 * before put_page is called.
1089 *
c1e8d7c6 1090 * If @locked != NULL, *@locked will be set to 0 when mmap_lock is
4f6da934
PX
1091 * released by an up_read(). That can happen if @gup_flags does not
1092 * have FOLL_NOWAIT.
9a95f3cf 1093 *
4f6da934 1094 * A caller using such a combination of @locked and @gup_flags
c1e8d7c6 1095 * must therefore hold the mmap_lock for reading only, and recognize
9a95f3cf
PC
1096 * when it's been released. Otherwise, it must be held for either
1097 * reading or writing and will not be released.
4bbd4c77
KS
1098 *
1099 * In most cases, get_user_pages or get_user_pages_fast should be used
1100 * instead of __get_user_pages. __get_user_pages should be used only if
1101 * you need some special @gup_flags.
1102 */
64019a2e 1103static long __get_user_pages(struct mm_struct *mm,
4bbd4c77
KS
1104 unsigned long start, unsigned long nr_pages,
1105 unsigned int gup_flags, struct page **pages,
4f6da934 1106 struct vm_area_struct **vmas, int *locked)
4bbd4c77 1107{
df06b37f 1108 long ret = 0, i = 0;
fa5bb209 1109 struct vm_area_struct *vma = NULL;
df06b37f 1110 struct follow_page_context ctx = { NULL };
4bbd4c77
KS
1111
1112 if (!nr_pages)
1113 return 0;
1114
f9652594
AK
1115 start = untagged_addr(start);
1116
eddb1c22 1117 VM_BUG_ON(!!pages != !!(gup_flags & (FOLL_GET | FOLL_PIN)));
4bbd4c77 1118
4bbd4c77 1119 do {
fa5bb209
KS
1120 struct page *page;
1121 unsigned int foll_flags = gup_flags;
1122 unsigned int page_increm;
1123
1124 /* first iteration or cross vma bound */
1125 if (!vma || start >= vma->vm_end) {
1126 vma = find_extend_vma(mm, start);
1127 if (!vma && in_gate_area(mm, start)) {
fa5bb209
KS
1128 ret = get_gate_page(mm, start & PAGE_MASK,
1129 gup_flags, &vma,
1130 pages ? &pages[i] : NULL);
1131 if (ret)
08be37b7 1132 goto out;
df06b37f 1133 ctx.page_mask = 0;
fa5bb209
KS
1134 goto next_page;
1135 }
4bbd4c77 1136
52650c8b 1137 if (!vma) {
df06b37f
KB
1138 ret = -EFAULT;
1139 goto out;
1140 }
52650c8b
JG
1141 ret = check_vma_flags(vma, gup_flags);
1142 if (ret)
1143 goto out;
1144
fa5bb209
KS
1145 if (is_vm_hugetlb_page(vma)) {
1146 i = follow_hugetlb_page(mm, vma, pages, vmas,
1147 &start, &nr_pages, i,
a308c71b 1148 gup_flags, locked);
ad415db8
PX
1149 if (locked && *locked == 0) {
1150 /*
1151 * We've got a VM_FAULT_RETRY
c1e8d7c6 1152 * and we've lost mmap_lock.
ad415db8
PX
1153 * We must stop here.
1154 */
1155 BUG_ON(gup_flags & FOLL_NOWAIT);
ad415db8
PX
1156 goto out;
1157 }
fa5bb209 1158 continue;
4bbd4c77 1159 }
fa5bb209
KS
1160 }
1161retry:
1162 /*
1163 * If we have a pending SIGKILL, don't keep faulting pages and
1164 * potentially allocating memory.
1165 */
fa45f116 1166 if (fatal_signal_pending(current)) {
d180870d 1167 ret = -EINTR;
df06b37f
KB
1168 goto out;
1169 }
fa5bb209 1170 cond_resched();
df06b37f
KB
1171
1172 page = follow_page_mask(vma, start, foll_flags, &ctx);
a7f22660
DH
1173 if (!page || PTR_ERR(page) == -EMLINK) {
1174 ret = faultin_page(vma, start, &foll_flags,
1175 PTR_ERR(page) == -EMLINK, locked);
fa5bb209
KS
1176 switch (ret) {
1177 case 0:
1178 goto retry;
df06b37f 1179 case -EBUSY:
d9272525 1180 case -EAGAIN:
df06b37f 1181 ret = 0;
e4a9bc58 1182 fallthrough;
fa5bb209
KS
1183 case -EFAULT:
1184 case -ENOMEM:
1185 case -EHWPOISON:
df06b37f 1186 goto out;
4bbd4c77 1187 }
fa5bb209 1188 BUG();
1027e443
KS
1189 } else if (PTR_ERR(page) == -EEXIST) {
1190 /*
1191 * Proper page table entry exists, but no corresponding
65462462
JH
1192 * struct page. If the caller expects **pages to be
1193 * filled in, bail out now, because that can't be done
1194 * for this page.
1027e443 1195 */
65462462
JH
1196 if (pages) {
1197 ret = PTR_ERR(page);
1198 goto out;
1199 }
1200
1027e443
KS
1201 goto next_page;
1202 } else if (IS_ERR(page)) {
df06b37f
KB
1203 ret = PTR_ERR(page);
1204 goto out;
1027e443 1205 }
fa5bb209
KS
1206 if (pages) {
1207 pages[i] = page;
1208 flush_anon_page(vma, page, start);
1209 flush_dcache_page(page);
df06b37f 1210 ctx.page_mask = 0;
4bbd4c77 1211 }
4bbd4c77 1212next_page:
fa5bb209
KS
1213 if (vmas) {
1214 vmas[i] = vma;
df06b37f 1215 ctx.page_mask = 0;
fa5bb209 1216 }
df06b37f 1217 page_increm = 1 + (~(start >> PAGE_SHIFT) & ctx.page_mask);
fa5bb209
KS
1218 if (page_increm > nr_pages)
1219 page_increm = nr_pages;
1220 i += page_increm;
1221 start += page_increm * PAGE_SIZE;
1222 nr_pages -= page_increm;
4bbd4c77 1223 } while (nr_pages);
df06b37f
KB
1224out:
1225 if (ctx.pgmap)
1226 put_dev_pagemap(ctx.pgmap);
1227 return i ? i : ret;
4bbd4c77 1228}
4bbd4c77 1229
771ab430
TK
1230static bool vma_permits_fault(struct vm_area_struct *vma,
1231 unsigned int fault_flags)
d4925e00 1232{
1b2ee126
DH
1233 bool write = !!(fault_flags & FAULT_FLAG_WRITE);
1234 bool foreign = !!(fault_flags & FAULT_FLAG_REMOTE);
33a709b2 1235 vm_flags_t vm_flags = write ? VM_WRITE : VM_READ;
d4925e00
DH
1236
1237 if (!(vm_flags & vma->vm_flags))
1238 return false;
1239
33a709b2
DH
1240 /*
1241 * The architecture might have a hardware protection
1b2ee126 1242 * mechanism other than read/write that can deny access.
d61172b4
DH
1243 *
1244 * gup always represents data access, not instruction
1245 * fetches, so execute=false here:
33a709b2 1246 */
d61172b4 1247 if (!arch_vma_access_permitted(vma, write, false, foreign))
33a709b2
DH
1248 return false;
1249
d4925e00
DH
1250 return true;
1251}
1252
adc8cb40 1253/**
4bbd4c77 1254 * fixup_user_fault() - manually resolve a user page fault
4bbd4c77
KS
1255 * @mm: mm_struct of target mm
1256 * @address: user address
1257 * @fault_flags:flags to pass down to handle_mm_fault()
c1e8d7c6 1258 * @unlocked: did we unlock the mmap_lock while retrying, maybe NULL if caller
548b6a1e
MC
1259 * does not allow retry. If NULL, the caller must guarantee
1260 * that fault_flags does not contain FAULT_FLAG_ALLOW_RETRY.
4bbd4c77
KS
1261 *
1262 * This is meant to be called in the specific scenario where for locking reasons
1263 * we try to access user memory in atomic context (within a pagefault_disable()
1264 * section), this returns -EFAULT, and we want to resolve the user fault before
1265 * trying again.
1266 *
1267 * Typically this is meant to be used by the futex code.
1268 *
1269 * The main difference with get_user_pages() is that this function will
1270 * unconditionally call handle_mm_fault() which will in turn perform all the
1271 * necessary SW fixup of the dirty and young bits in the PTE, while
4a9e1cda 1272 * get_user_pages() only guarantees to update these in the struct page.
4bbd4c77
KS
1273 *
1274 * This is important for some architectures where those bits also gate the
1275 * access permission to the page because they are maintained in software. On
1276 * such architectures, gup() will not be enough to make a subsequent access
1277 * succeed.
1278 *
c1e8d7c6
ML
1279 * This function will not return with an unlocked mmap_lock. So it has not the
1280 * same semantics wrt the @mm->mmap_lock as does filemap_fault().
4bbd4c77 1281 */
64019a2e 1282int fixup_user_fault(struct mm_struct *mm,
4a9e1cda
DD
1283 unsigned long address, unsigned int fault_flags,
1284 bool *unlocked)
4bbd4c77
KS
1285{
1286 struct vm_area_struct *vma;
8fed2f3c 1287 vm_fault_t ret;
4a9e1cda 1288
f9652594
AK
1289 address = untagged_addr(address);
1290
4a9e1cda 1291 if (unlocked)
71335f37 1292 fault_flags |= FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
4bbd4c77 1293
4a9e1cda 1294retry:
4bbd4c77
KS
1295 vma = find_extend_vma(mm, address);
1296 if (!vma || address < vma->vm_start)
1297 return -EFAULT;
1298
d4925e00 1299 if (!vma_permits_fault(vma, fault_flags))
4bbd4c77
KS
1300 return -EFAULT;
1301
475f4dfc
PX
1302 if ((fault_flags & FAULT_FLAG_KILLABLE) &&
1303 fatal_signal_pending(current))
1304 return -EINTR;
1305
bce617ed 1306 ret = handle_mm_fault(vma, address, fault_flags, NULL);
d9272525
PX
1307
1308 if (ret & VM_FAULT_COMPLETED) {
1309 /*
1310 * NOTE: it's a pity that we need to retake the lock here
1311 * to pair with the unlock() in the callers. Ideally we
1312 * could tell the callers so they do not need to unlock.
1313 */
1314 mmap_read_lock(mm);
1315 *unlocked = true;
1316 return 0;
1317 }
1318
4bbd4c77 1319 if (ret & VM_FAULT_ERROR) {
9a291a7c
JM
1320 int err = vm_fault_to_errno(ret, 0);
1321
1322 if (err)
1323 return err;
4bbd4c77
KS
1324 BUG();
1325 }
4a9e1cda
DD
1326
1327 if (ret & VM_FAULT_RETRY) {
d8ed45c5 1328 mmap_read_lock(mm);
475f4dfc
PX
1329 *unlocked = true;
1330 fault_flags |= FAULT_FLAG_TRIED;
1331 goto retry;
4a9e1cda
DD
1332 }
1333
4bbd4c77
KS
1334 return 0;
1335}
add6a0cd 1336EXPORT_SYMBOL_GPL(fixup_user_fault);
4bbd4c77 1337
2d3a36a4
MH
1338/*
1339 * Please note that this function, unlike __get_user_pages will not
1340 * return 0 for nr_pages > 0 without FOLL_NOWAIT
1341 */
64019a2e 1342static __always_inline long __get_user_pages_locked(struct mm_struct *mm,
f0818f47
AA
1343 unsigned long start,
1344 unsigned long nr_pages,
f0818f47
AA
1345 struct page **pages,
1346 struct vm_area_struct **vmas,
e716712f 1347 int *locked,
0fd71a56 1348 unsigned int flags)
f0818f47 1349{
f0818f47
AA
1350 long ret, pages_done;
1351 bool lock_dropped;
1352
1353 if (locked) {
1354 /* if VM_FAULT_RETRY can be returned, vmas become invalid */
1355 BUG_ON(vmas);
1356 /* check caller initialized locked */
1357 BUG_ON(*locked != 1);
1358 }
1359
a458b76a
AA
1360 if (flags & FOLL_PIN)
1361 mm_set_has_pinned_flag(&mm->flags);
008cfe44 1362
eddb1c22
JH
1363 /*
1364 * FOLL_PIN and FOLL_GET are mutually exclusive. Traditional behavior
1365 * is to set FOLL_GET if the caller wants pages[] filled in (but has
1366 * carelessly failed to specify FOLL_GET), so keep doing that, but only
1367 * for FOLL_GET, not for the newer FOLL_PIN.
1368 *
1369 * FOLL_PIN always expects pages to be non-null, but no need to assert
1370 * that here, as any failures will be obvious enough.
1371 */
1372 if (pages && !(flags & FOLL_PIN))
f0818f47 1373 flags |= FOLL_GET;
f0818f47
AA
1374
1375 pages_done = 0;
1376 lock_dropped = false;
1377 for (;;) {
64019a2e 1378 ret = __get_user_pages(mm, start, nr_pages, flags, pages,
f0818f47
AA
1379 vmas, locked);
1380 if (!locked)
1381 /* VM_FAULT_RETRY couldn't trigger, bypass */
1382 return ret;
1383
d9272525 1384 /* VM_FAULT_RETRY or VM_FAULT_COMPLETED cannot return errors */
f0818f47
AA
1385 if (!*locked) {
1386 BUG_ON(ret < 0);
1387 BUG_ON(ret >= nr_pages);
1388 }
1389
f0818f47
AA
1390 if (ret > 0) {
1391 nr_pages -= ret;
1392 pages_done += ret;
1393 if (!nr_pages)
1394 break;
1395 }
1396 if (*locked) {
96312e61
AA
1397 /*
1398 * VM_FAULT_RETRY didn't trigger or it was a
1399 * FOLL_NOWAIT.
1400 */
f0818f47
AA
1401 if (!pages_done)
1402 pages_done = ret;
1403 break;
1404 }
df17277b
MR
1405 /*
1406 * VM_FAULT_RETRY triggered, so seek to the faulting offset.
1407 * For the prefault case (!pages) we only update counts.
1408 */
1409 if (likely(pages))
1410 pages += ret;
f0818f47 1411 start += ret << PAGE_SHIFT;
4426e945 1412 lock_dropped = true;
f0818f47 1413
4426e945 1414retry:
f0818f47
AA
1415 /*
1416 * Repeat on the address that fired VM_FAULT_RETRY
4426e945
PX
1417 * with both FAULT_FLAG_ALLOW_RETRY and
1418 * FAULT_FLAG_TRIED. Note that GUP can be interrupted
1419 * by fatal signals, so we need to check it before we
1420 * start trying again otherwise it can loop forever.
f0818f47 1421 */
4426e945 1422
ae46d2aa
HD
1423 if (fatal_signal_pending(current)) {
1424 if (!pages_done)
1425 pages_done = -EINTR;
4426e945 1426 break;
ae46d2aa 1427 }
4426e945 1428
d8ed45c5 1429 ret = mmap_read_lock_killable(mm);
71335f37
PX
1430 if (ret) {
1431 BUG_ON(ret > 0);
1432 if (!pages_done)
1433 pages_done = ret;
1434 break;
1435 }
4426e945 1436
c7b6a566 1437 *locked = 1;
64019a2e 1438 ret = __get_user_pages(mm, start, 1, flags | FOLL_TRIED,
4426e945
PX
1439 pages, NULL, locked);
1440 if (!*locked) {
1441 /* Continue to retry until we succeeded */
1442 BUG_ON(ret != 0);
1443 goto retry;
1444 }
f0818f47
AA
1445 if (ret != 1) {
1446 BUG_ON(ret > 1);
1447 if (!pages_done)
1448 pages_done = ret;
1449 break;
1450 }
1451 nr_pages--;
1452 pages_done++;
1453 if (!nr_pages)
1454 break;
df17277b
MR
1455 if (likely(pages))
1456 pages++;
f0818f47
AA
1457 start += PAGE_SIZE;
1458 }
e716712f 1459 if (lock_dropped && *locked) {
f0818f47
AA
1460 /*
1461 * We must let the caller know we temporarily dropped the lock
1462 * and so the critical section protected by it was lost.
1463 */
d8ed45c5 1464 mmap_read_unlock(mm);
f0818f47
AA
1465 *locked = 0;
1466 }
1467 return pages_done;
1468}
1469
d3649f68
CH
1470/**
1471 * populate_vma_page_range() - populate a range of pages in the vma.
1472 * @vma: target vma
1473 * @start: start address
1474 * @end: end address
c1e8d7c6 1475 * @locked: whether the mmap_lock is still held
d3649f68
CH
1476 *
1477 * This takes care of mlocking the pages too if VM_LOCKED is set.
1478 *
0a36f7f8
TY
1479 * Return either number of pages pinned in the vma, or a negative error
1480 * code on error.
d3649f68 1481 *
c1e8d7c6 1482 * vma->vm_mm->mmap_lock must be held.
d3649f68 1483 *
4f6da934 1484 * If @locked is NULL, it may be held for read or write and will
d3649f68
CH
1485 * be unperturbed.
1486 *
4f6da934
PX
1487 * If @locked is non-NULL, it must held for read only and may be
1488 * released. If it's released, *@locked will be set to 0.
d3649f68
CH
1489 */
1490long populate_vma_page_range(struct vm_area_struct *vma,
4f6da934 1491 unsigned long start, unsigned long end, int *locked)
d3649f68
CH
1492{
1493 struct mm_struct *mm = vma->vm_mm;
1494 unsigned long nr_pages = (end - start) / PAGE_SIZE;
1495 int gup_flags;
ece369c7 1496 long ret;
d3649f68 1497
be51eb18
ML
1498 VM_BUG_ON(!PAGE_ALIGNED(start));
1499 VM_BUG_ON(!PAGE_ALIGNED(end));
d3649f68
CH
1500 VM_BUG_ON_VMA(start < vma->vm_start, vma);
1501 VM_BUG_ON_VMA(end > vma->vm_end, vma);
42fc5414 1502 mmap_assert_locked(mm);
d3649f68 1503
b67bf49c
HD
1504 /*
1505 * Rightly or wrongly, the VM_LOCKONFAULT case has never used
1506 * faultin_page() to break COW, so it has no work to do here.
1507 */
d3649f68 1508 if (vma->vm_flags & VM_LOCKONFAULT)
b67bf49c
HD
1509 return nr_pages;
1510
1511 gup_flags = FOLL_TOUCH;
d3649f68
CH
1512 /*
1513 * We want to touch writable mappings with a write fault in order
1514 * to break COW, except for shared mappings because these don't COW
1515 * and we would not want to dirty them for nothing.
1516 */
1517 if ((vma->vm_flags & (VM_WRITE | VM_SHARED)) == VM_WRITE)
1518 gup_flags |= FOLL_WRITE;
1519
1520 /*
1521 * We want mlock to succeed for regions that have any permissions
1522 * other than PROT_NONE.
1523 */
3122e80e 1524 if (vma_is_accessible(vma))
d3649f68
CH
1525 gup_flags |= FOLL_FORCE;
1526
1527 /*
1528 * We made sure addr is within a VMA, so the following will
1529 * not result in a stack expansion that recurses back here.
1530 */
ece369c7 1531 ret = __get_user_pages(mm, start, nr_pages, gup_flags,
4f6da934 1532 NULL, NULL, locked);
ece369c7
HD
1533 lru_add_drain();
1534 return ret;
d3649f68
CH
1535}
1536
4ca9b385
DH
1537/*
1538 * faultin_vma_page_range() - populate (prefault) page tables inside the
1539 * given VMA range readable/writable
1540 *
1541 * This takes care of mlocking the pages, too, if VM_LOCKED is set.
1542 *
1543 * @vma: target vma
1544 * @start: start address
1545 * @end: end address
1546 * @write: whether to prefault readable or writable
1547 * @locked: whether the mmap_lock is still held
1548 *
1549 * Returns either number of processed pages in the vma, or a negative error
1550 * code on error (see __get_user_pages()).
1551 *
1552 * vma->vm_mm->mmap_lock must be held. The range must be page-aligned and
1553 * covered by the VMA.
1554 *
1555 * If @locked is NULL, it may be held for read or write and will be unperturbed.
1556 *
1557 * If @locked is non-NULL, it must held for read only and may be released. If
1558 * it's released, *@locked will be set to 0.
1559 */
1560long faultin_vma_page_range(struct vm_area_struct *vma, unsigned long start,
1561 unsigned long end, bool write, int *locked)
1562{
1563 struct mm_struct *mm = vma->vm_mm;
1564 unsigned long nr_pages = (end - start) / PAGE_SIZE;
1565 int gup_flags;
ece369c7 1566 long ret;
4ca9b385
DH
1567
1568 VM_BUG_ON(!PAGE_ALIGNED(start));
1569 VM_BUG_ON(!PAGE_ALIGNED(end));
1570 VM_BUG_ON_VMA(start < vma->vm_start, vma);
1571 VM_BUG_ON_VMA(end > vma->vm_end, vma);
1572 mmap_assert_locked(mm);
1573
1574 /*
1575 * FOLL_TOUCH: Mark page accessed and thereby young; will also mark
1576 * the page dirty with FOLL_WRITE -- which doesn't make a
1577 * difference with !FOLL_FORCE, because the page is writable
1578 * in the page table.
1579 * FOLL_HWPOISON: Return -EHWPOISON instead of -EFAULT when we hit
1580 * a poisoned page.
4ca9b385
DH
1581 * !FOLL_FORCE: Require proper access permissions.
1582 */
b67bf49c 1583 gup_flags = FOLL_TOUCH | FOLL_HWPOISON;
4ca9b385
DH
1584 if (write)
1585 gup_flags |= FOLL_WRITE;
1586
1587 /*
eb2faa51
DH
1588 * We want to report -EINVAL instead of -EFAULT for any permission
1589 * problems or incompatible mappings.
4ca9b385 1590 */
eb2faa51
DH
1591 if (check_vma_flags(vma, gup_flags))
1592 return -EINVAL;
1593
ece369c7 1594 ret = __get_user_pages(mm, start, nr_pages, gup_flags,
4ca9b385 1595 NULL, NULL, locked);
ece369c7
HD
1596 lru_add_drain();
1597 return ret;
4ca9b385
DH
1598}
1599
d3649f68
CH
1600/*
1601 * __mm_populate - populate and/or mlock pages within a range of address space.
1602 *
1603 * This is used to implement mlock() and the MAP_POPULATE / MAP_LOCKED mmap
1604 * flags. VMAs must be already marked with the desired vm_flags, and
c1e8d7c6 1605 * mmap_lock must not be held.
d3649f68
CH
1606 */
1607int __mm_populate(unsigned long start, unsigned long len, int ignore_errors)
1608{
1609 struct mm_struct *mm = current->mm;
1610 unsigned long end, nstart, nend;
1611 struct vm_area_struct *vma = NULL;
1612 int locked = 0;
1613 long ret = 0;
1614
1615 end = start + len;
1616
1617 for (nstart = start; nstart < end; nstart = nend) {
1618 /*
1619 * We want to fault in pages for [nstart; end) address range.
1620 * Find first corresponding VMA.
1621 */
1622 if (!locked) {
1623 locked = 1;
d8ed45c5 1624 mmap_read_lock(mm);
c4d1a92d 1625 vma = find_vma_intersection(mm, nstart, end);
d3649f68 1626 } else if (nstart >= vma->vm_end)
c4d1a92d
LH
1627 vma = find_vma_intersection(mm, vma->vm_end, end);
1628
1629 if (!vma)
d3649f68
CH
1630 break;
1631 /*
1632 * Set [nstart; nend) to intersection of desired address
1633 * range with the first VMA. Also, skip undesirable VMA types.
1634 */
1635 nend = min(end, vma->vm_end);
1636 if (vma->vm_flags & (VM_IO | VM_PFNMAP))
1637 continue;
1638 if (nstart < vma->vm_start)
1639 nstart = vma->vm_start;
1640 /*
1641 * Now fault in a range of pages. populate_vma_page_range()
1642 * double checks the vma flags, so that it won't mlock pages
1643 * if the vma was already munlocked.
1644 */
1645 ret = populate_vma_page_range(vma, nstart, nend, &locked);
1646 if (ret < 0) {
1647 if (ignore_errors) {
1648 ret = 0;
1649 continue; /* continue at next VMA */
1650 }
1651 break;
1652 }
1653 nend = nstart + ret * PAGE_SIZE;
1654 ret = 0;
1655 }
1656 if (locked)
d8ed45c5 1657 mmap_read_unlock(mm);
d3649f68
CH
1658 return ret; /* 0 or negative error code */
1659}
050a9adc 1660#else /* CONFIG_MMU */
64019a2e 1661static long __get_user_pages_locked(struct mm_struct *mm, unsigned long start,
050a9adc
CH
1662 unsigned long nr_pages, struct page **pages,
1663 struct vm_area_struct **vmas, int *locked,
1664 unsigned int foll_flags)
1665{
1666 struct vm_area_struct *vma;
1667 unsigned long vm_flags;
24dc20c7 1668 long i;
050a9adc
CH
1669
1670 /* calculate required read or write permissions.
1671 * If FOLL_FORCE is set, we only require the "MAY" flags.
1672 */
1673 vm_flags = (foll_flags & FOLL_WRITE) ?
1674 (VM_WRITE | VM_MAYWRITE) : (VM_READ | VM_MAYREAD);
1675 vm_flags &= (foll_flags & FOLL_FORCE) ?
1676 (VM_MAYREAD | VM_MAYWRITE) : (VM_READ | VM_WRITE);
1677
1678 for (i = 0; i < nr_pages; i++) {
1679 vma = find_vma(mm, start);
1680 if (!vma)
1681 goto finish_or_fault;
1682
1683 /* protect what we can, including chardevs */
1684 if ((vma->vm_flags & (VM_IO | VM_PFNMAP)) ||
1685 !(vm_flags & vma->vm_flags))
1686 goto finish_or_fault;
1687
1688 if (pages) {
396a400b 1689 pages[i] = virt_to_page((void *)start);
050a9adc
CH
1690 if (pages[i])
1691 get_page(pages[i]);
1692 }
1693 if (vmas)
1694 vmas[i] = vma;
1695 start = (start + PAGE_SIZE) & PAGE_MASK;
1696 }
1697
1698 return i;
1699
1700finish_or_fault:
1701 return i ? : -EFAULT;
1702}
1703#endif /* !CONFIG_MMU */
d3649f68 1704
bb523b40
AG
1705/**
1706 * fault_in_writeable - fault in userspace address range for writing
1707 * @uaddr: start of address range
1708 * @size: size of address range
1709 *
1710 * Returns the number of bytes not faulted in (like copy_to_user() and
1711 * copy_from_user()).
1712 */
1713size_t fault_in_writeable(char __user *uaddr, size_t size)
1714{
1715 char __user *start = uaddr, *end;
1716
1717 if (unlikely(size == 0))
1718 return 0;
677b2a8c
CL
1719 if (!user_write_access_begin(uaddr, size))
1720 return size;
bb523b40 1721 if (!PAGE_ALIGNED(uaddr)) {
677b2a8c 1722 unsafe_put_user(0, uaddr, out);
bb523b40
AG
1723 uaddr = (char __user *)PAGE_ALIGN((unsigned long)uaddr);
1724 }
1725 end = (char __user *)PAGE_ALIGN((unsigned long)start + size);
1726 if (unlikely(end < start))
1727 end = NULL;
1728 while (uaddr != end) {
677b2a8c 1729 unsafe_put_user(0, uaddr, out);
bb523b40
AG
1730 uaddr += PAGE_SIZE;
1731 }
1732
1733out:
677b2a8c 1734 user_write_access_end();
bb523b40
AG
1735 if (size > uaddr - start)
1736 return size - (uaddr - start);
1737 return 0;
1738}
1739EXPORT_SYMBOL(fault_in_writeable);
1740
da32b581
CM
1741/**
1742 * fault_in_subpage_writeable - fault in an address range for writing
1743 * @uaddr: start of address range
1744 * @size: size of address range
1745 *
1746 * Fault in a user address range for writing while checking for permissions at
1747 * sub-page granularity (e.g. arm64 MTE). This function should be used when
1748 * the caller cannot guarantee forward progress of a copy_to_user() loop.
1749 *
1750 * Returns the number of bytes not faulted in (like copy_to_user() and
1751 * copy_from_user()).
1752 */
1753size_t fault_in_subpage_writeable(char __user *uaddr, size_t size)
1754{
1755 size_t faulted_in;
1756
1757 /*
1758 * Attempt faulting in at page granularity first for page table
1759 * permission checking. The arch-specific probe_subpage_writeable()
1760 * functions may not check for this.
1761 */
1762 faulted_in = size - fault_in_writeable(uaddr, size);
1763 if (faulted_in)
1764 faulted_in -= probe_subpage_writeable(uaddr, faulted_in);
1765
1766 return size - faulted_in;
1767}
1768EXPORT_SYMBOL(fault_in_subpage_writeable);
1769
cdd591fc
AG
1770/*
1771 * fault_in_safe_writeable - fault in an address range for writing
1772 * @uaddr: start of address range
1773 * @size: length of address range
1774 *
fe673d3f
LT
1775 * Faults in an address range for writing. This is primarily useful when we
1776 * already know that some or all of the pages in the address range aren't in
1777 * memory.
cdd591fc 1778 *
fe673d3f 1779 * Unlike fault_in_writeable(), this function is non-destructive.
cdd591fc
AG
1780 *
1781 * Note that we don't pin or otherwise hold the pages referenced that we fault
1782 * in. There's no guarantee that they'll stay in memory for any duration of
1783 * time.
1784 *
1785 * Returns the number of bytes not faulted in, like copy_to_user() and
1786 * copy_from_user().
1787 */
1788size_t fault_in_safe_writeable(const char __user *uaddr, size_t size)
1789{
fe673d3f 1790 unsigned long start = (unsigned long)uaddr, end;
cdd591fc 1791 struct mm_struct *mm = current->mm;
fe673d3f 1792 bool unlocked = false;
cdd591fc 1793
fe673d3f
LT
1794 if (unlikely(size == 0))
1795 return 0;
cdd591fc 1796 end = PAGE_ALIGN(start + size);
fe673d3f 1797 if (end < start)
cdd591fc 1798 end = 0;
cdd591fc 1799
fe673d3f
LT
1800 mmap_read_lock(mm);
1801 do {
1802 if (fixup_user_fault(mm, start, FAULT_FLAG_WRITE, &unlocked))
cdd591fc 1803 break;
fe673d3f
LT
1804 start = (start + PAGE_SIZE) & PAGE_MASK;
1805 } while (start != end);
1806 mmap_read_unlock(mm);
1807
1808 if (size > (unsigned long)uaddr - start)
1809 return size - ((unsigned long)uaddr - start);
1810 return 0;
cdd591fc
AG
1811}
1812EXPORT_SYMBOL(fault_in_safe_writeable);
1813
bb523b40
AG
1814/**
1815 * fault_in_readable - fault in userspace address range for reading
1816 * @uaddr: start of user address range
1817 * @size: size of user address range
1818 *
1819 * Returns the number of bytes not faulted in (like copy_to_user() and
1820 * copy_from_user()).
1821 */
1822size_t fault_in_readable(const char __user *uaddr, size_t size)
1823{
1824 const char __user *start = uaddr, *end;
1825 volatile char c;
1826
1827 if (unlikely(size == 0))
1828 return 0;
677b2a8c
CL
1829 if (!user_read_access_begin(uaddr, size))
1830 return size;
bb523b40 1831 if (!PAGE_ALIGNED(uaddr)) {
677b2a8c 1832 unsafe_get_user(c, uaddr, out);
bb523b40
AG
1833 uaddr = (const char __user *)PAGE_ALIGN((unsigned long)uaddr);
1834 }
1835 end = (const char __user *)PAGE_ALIGN((unsigned long)start + size);
1836 if (unlikely(end < start))
1837 end = NULL;
1838 while (uaddr != end) {
677b2a8c 1839 unsafe_get_user(c, uaddr, out);
bb523b40
AG
1840 uaddr += PAGE_SIZE;
1841 }
1842
1843out:
677b2a8c 1844 user_read_access_end();
bb523b40
AG
1845 (void)c;
1846 if (size > uaddr - start)
1847 return size - (uaddr - start);
1848 return 0;
1849}
1850EXPORT_SYMBOL(fault_in_readable);
1851
8f942eea
JH
1852/**
1853 * get_dump_page() - pin user page in memory while writing it to core dump
1854 * @addr: user address
1855 *
1856 * Returns struct page pointer of user page pinned for dump,
1857 * to be freed afterwards by put_page().
1858 *
1859 * Returns NULL on any kind of failure - a hole must then be inserted into
1860 * the corefile, to preserve alignment with its headers; and also returns
1861 * NULL wherever the ZERO_PAGE, or an anonymous pte_none, has been found -
f0953a1b 1862 * allowing a hole to be left in the corefile to save disk space.
8f942eea 1863 *
7f3bfab5 1864 * Called without mmap_lock (takes and releases the mmap_lock by itself).
8f942eea
JH
1865 */
1866#ifdef CONFIG_ELF_CORE
1867struct page *get_dump_page(unsigned long addr)
1868{
7f3bfab5 1869 struct mm_struct *mm = current->mm;
8f942eea 1870 struct page *page;
7f3bfab5
JH
1871 int locked = 1;
1872 int ret;
8f942eea 1873
7f3bfab5 1874 if (mmap_read_lock_killable(mm))
8f942eea 1875 return NULL;
7f3bfab5
JH
1876 ret = __get_user_pages_locked(mm, addr, 1, &page, NULL, &locked,
1877 FOLL_FORCE | FOLL_DUMP | FOLL_GET);
1878 if (locked)
1879 mmap_read_unlock(mm);
1880 return (ret == 1) ? page : NULL;
8f942eea
JH
1881}
1882#endif /* CONFIG_ELF_CORE */
1883
d1e153fe 1884#ifdef CONFIG_MIGRATION
f68749ec 1885/*
67e139b0 1886 * Returns the number of collected pages. Return value is always >= 0.
f68749ec 1887 */
67e139b0
AP
1888static unsigned long collect_longterm_unpinnable_pages(
1889 struct list_head *movable_page_list,
1890 unsigned long nr_pages,
1891 struct page **pages)
9a4e9f3b 1892{
67e139b0 1893 unsigned long i, collected = 0;
1b7f7e58 1894 struct folio *prev_folio = NULL;
67e139b0 1895 bool drain_allow = true;
9a4e9f3b 1896
83c02c23 1897 for (i = 0; i < nr_pages; i++) {
1b7f7e58 1898 struct folio *folio = page_folio(pages[i]);
f9f38f78 1899
1b7f7e58 1900 if (folio == prev_folio)
83c02c23 1901 continue;
1b7f7e58 1902 prev_folio = folio;
f9f38f78 1903
67e139b0
AP
1904 if (folio_is_longterm_pinnable(folio))
1905 continue;
b05a79d4 1906
67e139b0 1907 collected++;
b05a79d4 1908
67e139b0 1909 if (folio_is_device_coherent(folio))
f9f38f78
CH
1910 continue;
1911
1b7f7e58 1912 if (folio_test_hugetlb(folio)) {
67e139b0 1913 isolate_hugetlb(&folio->page, movable_page_list);
f9f38f78
CH
1914 continue;
1915 }
9a4e9f3b 1916
1b7f7e58 1917 if (!folio_test_lru(folio) && drain_allow) {
f9f38f78
CH
1918 lru_add_drain_all();
1919 drain_allow = false;
1920 }
1921
67e139b0 1922 if (!folio_isolate_lru(folio))
f9f38f78 1923 continue;
67e139b0
AP
1924
1925 list_add_tail(&folio->lru, movable_page_list);
1b7f7e58
MWO
1926 node_stat_mod_folio(folio,
1927 NR_ISOLATED_ANON + folio_is_file_lru(folio),
1928 folio_nr_pages(folio));
9a4e9f3b
AK
1929 }
1930
67e139b0
AP
1931 return collected;
1932}
1933
1934/*
1935 * Unpins all pages and migrates device coherent pages and movable_page_list.
1936 * Returns -EAGAIN if all pages were successfully migrated or -errno for failure
1937 * (or partial success).
1938 */
1939static int migrate_longterm_unpinnable_pages(
1940 struct list_head *movable_page_list,
1941 unsigned long nr_pages,
1942 struct page **pages)
1943{
1944 int ret;
1945 unsigned long i;
6e7f34eb 1946
b05a79d4 1947 for (i = 0; i < nr_pages; i++) {
67e139b0
AP
1948 struct folio *folio = page_folio(pages[i]);
1949
1950 if (folio_is_device_coherent(folio)) {
1951 /*
1952 * Migration will fail if the page is pinned, so convert
1953 * the pin on the source page to a normal reference.
1954 */
1955 pages[i] = NULL;
1956 folio_get(folio);
1957 gup_put_folio(folio, 1, FOLL_PIN);
1958
1959 if (migrate_device_coherent_page(&folio->page)) {
1960 ret = -EBUSY;
1961 goto err;
1962 }
1963
b05a79d4 1964 continue;
67e139b0 1965 }
b05a79d4 1966
67e139b0
AP
1967 /*
1968 * We can't migrate pages with unexpected references, so drop
1969 * the reference obtained by __get_user_pages_locked().
1970 * Migrating pages have been added to movable_page_list after
1971 * calling folio_isolate_lru() which takes a reference so the
1972 * page won't be freed if it's migrating.
1973 */
f6d299ec 1974 unpin_user_page(pages[i]);
67e139b0 1975 pages[i] = NULL;
f68749ec 1976 }
f9f38f78 1977
67e139b0 1978 if (!list_empty(movable_page_list)) {
f9f38f78
CH
1979 struct migration_target_control mtc = {
1980 .nid = NUMA_NO_NODE,
1981 .gfp_mask = GFP_USER | __GFP_NOWARN,
1982 };
1983
67e139b0
AP
1984 if (migrate_pages(movable_page_list, alloc_migration_target,
1985 NULL, (unsigned long)&mtc, MIGRATE_SYNC,
1986 MR_LONGTERM_PIN, NULL)) {
f9f38f78 1987 ret = -ENOMEM;
67e139b0
AP
1988 goto err;
1989 }
9a4e9f3b
AK
1990 }
1991
67e139b0
AP
1992 putback_movable_pages(movable_page_list);
1993
1994 return -EAGAIN;
1995
1996err:
1997 for (i = 0; i < nr_pages; i++)
1998 if (pages[i])
1999 unpin_user_page(pages[i]);
2000 putback_movable_pages(movable_page_list);
24a95998 2001
67e139b0
AP
2002 return ret;
2003}
2004
2005/*
2006 * Check whether all pages are *allowed* to be pinned. Rather confusingly, all
2007 * pages in the range are required to be pinned via FOLL_PIN, before calling
2008 * this routine.
2009 *
2010 * If any pages in the range are not allowed to be pinned, then this routine
2011 * will migrate those pages away, unpin all the pages in the range and return
2012 * -EAGAIN. The caller should re-pin the entire range with FOLL_PIN and then
2013 * call this routine again.
2014 *
2015 * If an error other than -EAGAIN occurs, this indicates a migration failure.
2016 * The caller should give up, and propagate the error back up the call stack.
2017 *
2018 * If everything is OK and all pages in the range are allowed to be pinned, then
2019 * this routine leaves all pages pinned and returns zero for success.
2020 */
2021static long check_and_migrate_movable_pages(unsigned long nr_pages,
2022 struct page **pages)
2023{
2024 unsigned long collected;
2025 LIST_HEAD(movable_page_list);
2026
2027 collected = collect_longterm_unpinnable_pages(&movable_page_list,
2028 nr_pages, pages);
2029 if (!collected)
2030 return 0;
2031
2032 return migrate_longterm_unpinnable_pages(&movable_page_list, nr_pages,
2033 pages);
9a4e9f3b
AK
2034}
2035#else
f68749ec 2036static long check_and_migrate_movable_pages(unsigned long nr_pages,
f6d299ec 2037 struct page **pages)
9a4e9f3b 2038{
24a95998 2039 return 0;
9a4e9f3b 2040}
d1e153fe 2041#endif /* CONFIG_MIGRATION */
9a4e9f3b 2042
2bb6d283 2043/*
932f4a63
IW
2044 * __gup_longterm_locked() is a wrapper for __get_user_pages_locked which
2045 * allows us to process the FOLL_LONGTERM flag.
2bb6d283 2046 */
64019a2e 2047static long __gup_longterm_locked(struct mm_struct *mm,
932f4a63
IW
2048 unsigned long start,
2049 unsigned long nr_pages,
2050 struct page **pages,
2051 struct vm_area_struct **vmas,
2052 unsigned int gup_flags)
2bb6d283 2053{
f68749ec 2054 unsigned int flags;
24a95998 2055 long rc, nr_pinned_pages;
2bb6d283 2056
f68749ec
PT
2057 if (!(gup_flags & FOLL_LONGTERM))
2058 return __get_user_pages_locked(mm, start, nr_pages, pages, vmas,
2059 NULL, gup_flags);
67e139b0
AP
2060
2061 /*
2062 * If we get to this point then FOLL_LONGTERM is set, and FOLL_LONGTERM
2063 * implies FOLL_PIN (although the reverse is not true). Therefore it is
2064 * correct to unconditionally call check_and_migrate_movable_pages()
2065 * which assumes pages have been pinned via FOLL_PIN.
2066 *
2067 * Enforce the above reasoning by asserting that FOLL_PIN is set.
2068 */
f6d299ec
AP
2069 if (WARN_ON(!(gup_flags & FOLL_PIN)))
2070 return -EINVAL;
f68749ec
PT
2071 flags = memalloc_pin_save();
2072 do {
24a95998
AP
2073 nr_pinned_pages = __get_user_pages_locked(mm, start, nr_pages,
2074 pages, vmas, NULL,
2075 gup_flags);
2076 if (nr_pinned_pages <= 0) {
2077 rc = nr_pinned_pages;
f68749ec 2078 break;
24a95998 2079 }
f6d299ec 2080 rc = check_and_migrate_movable_pages(nr_pinned_pages, pages);
24a95998 2081 } while (rc == -EAGAIN);
f68749ec 2082 memalloc_pin_restore(flags);
2bb6d283 2083
24a95998 2084 return rc ? rc : nr_pinned_pages;
2bb6d283 2085}
932f4a63 2086
447f3e45
BS
2087static bool is_valid_gup_flags(unsigned int gup_flags)
2088{
2089 /*
2090 * FOLL_PIN must only be set internally by the pin_user_pages*() APIs,
2091 * never directly by the caller, so enforce that with an assertion:
2092 */
2093 if (WARN_ON_ONCE(gup_flags & FOLL_PIN))
2094 return false;
2095 /*
2096 * FOLL_PIN is a prerequisite to FOLL_LONGTERM. Another way of saying
2097 * that is, FOLL_LONGTERM is a specific case, more restrictive case of
2098 * FOLL_PIN.
2099 */
2100 if (WARN_ON_ONCE(gup_flags & FOLL_LONGTERM))
2101 return false;
2102
2103 return true;
2104}
2105
22bf29b6 2106#ifdef CONFIG_MMU
64019a2e 2107static long __get_user_pages_remote(struct mm_struct *mm,
22bf29b6
JH
2108 unsigned long start, unsigned long nr_pages,
2109 unsigned int gup_flags, struct page **pages,
2110 struct vm_area_struct **vmas, int *locked)
2111{
2112 /*
2113 * Parts of FOLL_LONGTERM behavior are incompatible with
2114 * FAULT_FLAG_ALLOW_RETRY because of the FS DAX check requirement on
2115 * vmas. However, this only comes up if locked is set, and there are
2116 * callers that do request FOLL_LONGTERM, but do not set locked. So,
2117 * allow what we can.
2118 */
2119 if (gup_flags & FOLL_LONGTERM) {
2120 if (WARN_ON_ONCE(locked))
2121 return -EINVAL;
2122 /*
2123 * This will check the vmas (even if our vmas arg is NULL)
2124 * and return -ENOTSUPP if DAX isn't allowed in this case:
2125 */
64019a2e 2126 return __gup_longterm_locked(mm, start, nr_pages, pages,
22bf29b6
JH
2127 vmas, gup_flags | FOLL_TOUCH |
2128 FOLL_REMOTE);
2129 }
2130
64019a2e 2131 return __get_user_pages_locked(mm, start, nr_pages, pages, vmas,
22bf29b6
JH
2132 locked,
2133 gup_flags | FOLL_TOUCH | FOLL_REMOTE);
2134}
2135
adc8cb40 2136/**
c4237f8b 2137 * get_user_pages_remote() - pin user pages in memory
c4237f8b
JH
2138 * @mm: mm_struct of target mm
2139 * @start: starting user address
2140 * @nr_pages: number of pages from start to pin
2141 * @gup_flags: flags modifying lookup behaviour
2142 * @pages: array that receives pointers to the pages pinned.
2143 * Should be at least nr_pages long. Or NULL, if caller
2144 * only intends to ensure the pages are faulted in.
2145 * @vmas: array of pointers to vmas corresponding to each page.
2146 * Or NULL if the caller does not require them.
2147 * @locked: pointer to lock flag indicating whether lock is held and
2148 * subsequently whether VM_FAULT_RETRY functionality can be
2149 * utilised. Lock must initially be held.
2150 *
2151 * Returns either number of pages pinned (which may be less than the
2152 * number requested), or an error. Details about the return value:
2153 *
2154 * -- If nr_pages is 0, returns 0.
2155 * -- If nr_pages is >0, but no pages were pinned, returns -errno.
2156 * -- If nr_pages is >0, and some pages were pinned, returns the number of
2157 * pages pinned. Again, this may be less than nr_pages.
2158 *
2159 * The caller is responsible for releasing returned @pages, via put_page().
2160 *
c1e8d7c6 2161 * @vmas are valid only as long as mmap_lock is held.
c4237f8b 2162 *
c1e8d7c6 2163 * Must be called with mmap_lock held for read or write.
c4237f8b 2164 *
adc8cb40
SJ
2165 * get_user_pages_remote walks a process's page tables and takes a reference
2166 * to each struct page that each user address corresponds to at a given
c4237f8b
JH
2167 * instant. That is, it takes the page that would be accessed if a user
2168 * thread accesses the given user virtual address at that instant.
2169 *
2170 * This does not guarantee that the page exists in the user mappings when
adc8cb40 2171 * get_user_pages_remote returns, and there may even be a completely different
c4237f8b
JH
2172 * page there in some cases (eg. if mmapped pagecache has been invalidated
2173 * and subsequently re faulted). However it does guarantee that the page
2174 * won't be freed completely. And mostly callers simply care that the page
2175 * contains data that was valid *at some point in time*. Typically, an IO
2176 * or similar operation cannot guarantee anything stronger anyway because
2177 * locks can't be held over the syscall boundary.
2178 *
2179 * If gup_flags & FOLL_WRITE == 0, the page must not be written to. If the page
2180 * is written to, set_page_dirty (or set_page_dirty_lock, as appropriate) must
2181 * be called after the page is finished with, and before put_page is called.
2182 *
adc8cb40
SJ
2183 * get_user_pages_remote is typically used for fewer-copy IO operations,
2184 * to get a handle on the memory by some means other than accesses
2185 * via the user virtual addresses. The pages may be submitted for
2186 * DMA to devices or accessed via their kernel linear mapping (via the
2187 * kmap APIs). Care should be taken to use the correct cache flushing APIs.
c4237f8b
JH
2188 *
2189 * See also get_user_pages_fast, for performance critical applications.
2190 *
adc8cb40 2191 * get_user_pages_remote should be phased out in favor of
c4237f8b 2192 * get_user_pages_locked|unlocked or get_user_pages_fast. Nothing
adc8cb40 2193 * should use get_user_pages_remote because it cannot pass
c4237f8b
JH
2194 * FAULT_FLAG_ALLOW_RETRY to handle_mm_fault.
2195 */
64019a2e 2196long get_user_pages_remote(struct mm_struct *mm,
c4237f8b
JH
2197 unsigned long start, unsigned long nr_pages,
2198 unsigned int gup_flags, struct page **pages,
2199 struct vm_area_struct **vmas, int *locked)
2200{
447f3e45 2201 if (!is_valid_gup_flags(gup_flags))
eddb1c22
JH
2202 return -EINVAL;
2203
64019a2e 2204 return __get_user_pages_remote(mm, start, nr_pages, gup_flags,
22bf29b6 2205 pages, vmas, locked);
c4237f8b
JH
2206}
2207EXPORT_SYMBOL(get_user_pages_remote);
2208
eddb1c22 2209#else /* CONFIG_MMU */
64019a2e 2210long get_user_pages_remote(struct mm_struct *mm,
eddb1c22
JH
2211 unsigned long start, unsigned long nr_pages,
2212 unsigned int gup_flags, struct page **pages,
2213 struct vm_area_struct **vmas, int *locked)
2214{
2215 return 0;
2216}
3faa52c0 2217
64019a2e 2218static long __get_user_pages_remote(struct mm_struct *mm,
3faa52c0
JH
2219 unsigned long start, unsigned long nr_pages,
2220 unsigned int gup_flags, struct page **pages,
2221 struct vm_area_struct **vmas, int *locked)
2222{
2223 return 0;
2224}
eddb1c22
JH
2225#endif /* !CONFIG_MMU */
2226
adc8cb40
SJ
2227/**
2228 * get_user_pages() - pin user pages in memory
2229 * @start: starting user address
2230 * @nr_pages: number of pages from start to pin
2231 * @gup_flags: flags modifying lookup behaviour
2232 * @pages: array that receives pointers to the pages pinned.
2233 * Should be at least nr_pages long. Or NULL, if caller
2234 * only intends to ensure the pages are faulted in.
2235 * @vmas: array of pointers to vmas corresponding to each page.
2236 * Or NULL if the caller does not require them.
2237 *
64019a2e
PX
2238 * This is the same as get_user_pages_remote(), just with a less-flexible
2239 * calling convention where we assume that the mm being operated on belongs to
2240 * the current task, and doesn't allow passing of a locked parameter. We also
2241 * obviously don't pass FOLL_REMOTE in here.
932f4a63
IW
2242 */
2243long get_user_pages(unsigned long start, unsigned long nr_pages,
2244 unsigned int gup_flags, struct page **pages,
2245 struct vm_area_struct **vmas)
2246{
447f3e45 2247 if (!is_valid_gup_flags(gup_flags))
eddb1c22
JH
2248 return -EINVAL;
2249
64019a2e 2250 return __gup_longterm_locked(current->mm, start, nr_pages,
932f4a63
IW
2251 pages, vmas, gup_flags | FOLL_TOUCH);
2252}
2253EXPORT_SYMBOL(get_user_pages);
2bb6d283 2254
acc3c8d1 2255/*
d3649f68 2256 * get_user_pages_unlocked() is suitable to replace the form:
acc3c8d1 2257 *
3e4e28c5 2258 * mmap_read_lock(mm);
64019a2e 2259 * get_user_pages(mm, ..., pages, NULL);
3e4e28c5 2260 * mmap_read_unlock(mm);
d3649f68
CH
2261 *
2262 * with:
2263 *
64019a2e 2264 * get_user_pages_unlocked(mm, ..., pages);
d3649f68
CH
2265 *
2266 * It is functionally equivalent to get_user_pages_fast so
2267 * get_user_pages_fast should be used instead if specific gup_flags
2268 * (e.g. FOLL_FORCE) are not required.
acc3c8d1 2269 */
d3649f68
CH
2270long get_user_pages_unlocked(unsigned long start, unsigned long nr_pages,
2271 struct page **pages, unsigned int gup_flags)
acc3c8d1
KS
2272{
2273 struct mm_struct *mm = current->mm;
d3649f68
CH
2274 int locked = 1;
2275 long ret;
acc3c8d1 2276
d3649f68
CH
2277 /*
2278 * FIXME: Current FOLL_LONGTERM behavior is incompatible with
2279 * FAULT_FLAG_ALLOW_RETRY because of the FS DAX check requirement on
2280 * vmas. As there are no users of this flag in this call we simply
2281 * disallow this option for now.
2282 */
2283 if (WARN_ON_ONCE(gup_flags & FOLL_LONGTERM))
2284 return -EINVAL;
acc3c8d1 2285
d8ed45c5 2286 mmap_read_lock(mm);
64019a2e 2287 ret = __get_user_pages_locked(mm, start, nr_pages, pages, NULL,
d3649f68 2288 &locked, gup_flags | FOLL_TOUCH);
acc3c8d1 2289 if (locked)
d8ed45c5 2290 mmap_read_unlock(mm);
d3649f68 2291 return ret;
4bbd4c77 2292}
d3649f68 2293EXPORT_SYMBOL(get_user_pages_unlocked);
2667f50e
SC
2294
2295/*
67a929e0 2296 * Fast GUP
2667f50e
SC
2297 *
2298 * get_user_pages_fast attempts to pin user pages by walking the page
2299 * tables directly and avoids taking locks. Thus the walker needs to be
2300 * protected from page table pages being freed from under it, and should
2301 * block any THP splits.
2302 *
2303 * One way to achieve this is to have the walker disable interrupts, and
2304 * rely on IPIs from the TLB flushing code blocking before the page table
2305 * pages are freed. This is unsuitable for architectures that do not need
2306 * to broadcast an IPI when invalidating TLBs.
2307 *
2308 * Another way to achieve this is to batch up page table containing pages
2309 * belonging to more than one mm_user, then rcu_sched a callback to free those
2310 * pages. Disabling interrupts will allow the fast_gup walker to both block
2311 * the rcu_sched callback, and an IPI that we broadcast for splitting THPs
2312 * (which is a relatively rare event). The code below adopts this strategy.
2313 *
2314 * Before activating this code, please be aware that the following assumptions
2315 * are currently made:
2316 *
ff2e6d72 2317 * *) Either MMU_GATHER_RCU_TABLE_FREE is enabled, and tlb_remove_table() is used to
e585513b 2318 * free pages containing page tables or TLB flushing requires IPI broadcast.
2667f50e 2319 *
2667f50e
SC
2320 * *) ptes can be read atomically by the architecture.
2321 *
2322 * *) access_ok is sufficient to validate userspace address ranges.
2323 *
2324 * The last two assumptions can be relaxed by the addition of helper functions.
2325 *
2326 * This code is based heavily on the PowerPC implementation by Nick Piggin.
2327 */
67a929e0 2328#ifdef CONFIG_HAVE_FAST_GUP
3faa52c0 2329
790c7369 2330static void __maybe_unused undo_dev_pagemap(int *nr, int nr_start,
3b78d834 2331 unsigned int flags,
790c7369 2332 struct page **pages)
b59f65fa
KS
2333{
2334 while ((*nr) - nr_start) {
2335 struct page *page = pages[--(*nr)];
2336
2337 ClearPageReferenced(page);
3faa52c0
JH
2338 if (flags & FOLL_PIN)
2339 unpin_user_page(page);
2340 else
2341 put_page(page);
b59f65fa
KS
2342 }
2343}
2344
3010a5ea 2345#ifdef CONFIG_ARCH_HAS_PTE_SPECIAL
70cbc3cc
YS
2346/*
2347 * Fast-gup relies on pte change detection to avoid concurrent pgtable
2348 * operations.
2349 *
2350 * To pin the page, fast-gup needs to do below in order:
2351 * (1) pin the page (by prefetching pte), then (2) check pte not changed.
2352 *
2353 * For the rest of pgtable operations where pgtable updates can be racy
2354 * with fast-gup, we need to do (1) clear pte, then (2) check whether page
2355 * is pinned.
2356 *
2357 * Above will work for all pte-level operations, including THP split.
2358 *
2359 * For THP collapse, it's a bit more complicated because fast-gup may be
2360 * walking a pgtable page that is being freed (pte is still valid but pmd
2361 * can be cleared already). To avoid race in such condition, we need to
2362 * also check pmd here to make sure pmd doesn't change (corresponds to
2363 * pmdp_collapse_flush() in the THP collapse code path).
2364 */
2365static int gup_pte_range(pmd_t pmd, pmd_t *pmdp, unsigned long addr,
2366 unsigned long end, unsigned int flags,
2367 struct page **pages, int *nr)
2667f50e 2368{
b59f65fa
KS
2369 struct dev_pagemap *pgmap = NULL;
2370 int nr_start = *nr, ret = 0;
2667f50e 2371 pte_t *ptep, *ptem;
2667f50e
SC
2372
2373 ptem = ptep = pte_offset_map(&pmd, addr);
2374 do {
2a4a06da 2375 pte_t pte = ptep_get_lockless(ptep);
b0496fe4
MWO
2376 struct page *page;
2377 struct folio *folio;
2667f50e 2378
0cf45986 2379 if (pte_protnone(pte) && !gup_can_follow_protnone(flags))
e7884f8e
KS
2380 goto pte_unmap;
2381
b798bec4 2382 if (!pte_access_permitted(pte, flags & FOLL_WRITE))
e7884f8e
KS
2383 goto pte_unmap;
2384
b59f65fa 2385 if (pte_devmap(pte)) {
7af75561
IW
2386 if (unlikely(flags & FOLL_LONGTERM))
2387 goto pte_unmap;
2388
b59f65fa
KS
2389 pgmap = get_dev_pagemap(pte_pfn(pte), pgmap);
2390 if (unlikely(!pgmap)) {
3b78d834 2391 undo_dev_pagemap(nr, nr_start, flags, pages);
b59f65fa
KS
2392 goto pte_unmap;
2393 }
2394 } else if (pte_special(pte))
2667f50e
SC
2395 goto pte_unmap;
2396
2397 VM_BUG_ON(!pfn_valid(pte_pfn(pte)));
2398 page = pte_page(pte);
2399
b0496fe4
MWO
2400 folio = try_grab_folio(page, 1, flags);
2401 if (!folio)
2667f50e
SC
2402 goto pte_unmap;
2403
1507f512 2404 if (unlikely(page_is_secretmem(page))) {
b0496fe4 2405 gup_put_folio(folio, 1, flags);
1507f512
MR
2406 goto pte_unmap;
2407 }
2408
70cbc3cc
YS
2409 if (unlikely(pmd_val(pmd) != pmd_val(*pmdp)) ||
2410 unlikely(pte_val(pte) != pte_val(*ptep))) {
b0496fe4 2411 gup_put_folio(folio, 1, flags);
2667f50e
SC
2412 goto pte_unmap;
2413 }
2414
a7f22660
DH
2415 if (!pte_write(pte) && gup_must_unshare(flags, page)) {
2416 gup_put_folio(folio, 1, flags);
2417 goto pte_unmap;
2418 }
2419
f28d4363
CI
2420 /*
2421 * We need to make the page accessible if and only if we are
2422 * going to access its content (the FOLL_PIN case). Please
2423 * see Documentation/core-api/pin_user_pages.rst for
2424 * details.
2425 */
2426 if (flags & FOLL_PIN) {
2427 ret = arch_make_page_accessible(page);
2428 if (ret) {
b0496fe4 2429 gup_put_folio(folio, 1, flags);
f28d4363
CI
2430 goto pte_unmap;
2431 }
2432 }
b0496fe4 2433 folio_set_referenced(folio);
2667f50e
SC
2434 pages[*nr] = page;
2435 (*nr)++;
2667f50e
SC
2436 } while (ptep++, addr += PAGE_SIZE, addr != end);
2437
2438 ret = 1;
2439
2440pte_unmap:
832d7aa0
CH
2441 if (pgmap)
2442 put_dev_pagemap(pgmap);
2667f50e
SC
2443 pte_unmap(ptem);
2444 return ret;
2445}
2446#else
2447
2448/*
2449 * If we can't determine whether or not a pte is special, then fail immediately
2450 * for ptes. Note, we can still pin HugeTLB and THP as these are guaranteed not
2451 * to be special.
2452 *
2453 * For a futex to be placed on a THP tail page, get_futex_key requires a
dadbb612 2454 * get_user_pages_fast_only implementation that can pin pages. Thus it's still
2667f50e
SC
2455 * useful to have gup_huge_pmd even if we can't operate on ptes.
2456 */
70cbc3cc
YS
2457static int gup_pte_range(pmd_t pmd, pmd_t *pmdp, unsigned long addr,
2458 unsigned long end, unsigned int flags,
2459 struct page **pages, int *nr)
2667f50e
SC
2460{
2461 return 0;
2462}
3010a5ea 2463#endif /* CONFIG_ARCH_HAS_PTE_SPECIAL */
2667f50e 2464
17596731 2465#if defined(CONFIG_ARCH_HAS_PTE_DEVMAP) && defined(CONFIG_TRANSPARENT_HUGEPAGE)
b59f65fa 2466static int __gup_device_huge(unsigned long pfn, unsigned long addr,
86dfbed4
JH
2467 unsigned long end, unsigned int flags,
2468 struct page **pages, int *nr)
b59f65fa
KS
2469{
2470 int nr_start = *nr;
2471 struct dev_pagemap *pgmap = NULL;
2472
2473 do {
2474 struct page *page = pfn_to_page(pfn);
2475
2476 pgmap = get_dev_pagemap(pfn, pgmap);
2477 if (unlikely(!pgmap)) {
3b78d834 2478 undo_dev_pagemap(nr, nr_start, flags, pages);
6401c4eb 2479 break;
b59f65fa
KS
2480 }
2481 SetPageReferenced(page);
2482 pages[*nr] = page;
3faa52c0
JH
2483 if (unlikely(!try_grab_page(page, flags))) {
2484 undo_dev_pagemap(nr, nr_start, flags, pages);
6401c4eb 2485 break;
3faa52c0 2486 }
b59f65fa
KS
2487 (*nr)++;
2488 pfn++;
2489 } while (addr += PAGE_SIZE, addr != end);
832d7aa0 2490
6401c4eb 2491 put_dev_pagemap(pgmap);
20b7fee7 2492 return addr == end;
b59f65fa
KS
2493}
2494
a9b6de77 2495static int __gup_device_huge_pmd(pmd_t orig, pmd_t *pmdp, unsigned long addr,
86dfbed4
JH
2496 unsigned long end, unsigned int flags,
2497 struct page **pages, int *nr)
b59f65fa
KS
2498{
2499 unsigned long fault_pfn;
a9b6de77
DW
2500 int nr_start = *nr;
2501
2502 fault_pfn = pmd_pfn(orig) + ((addr & ~PMD_MASK) >> PAGE_SHIFT);
86dfbed4 2503 if (!__gup_device_huge(fault_pfn, addr, end, flags, pages, nr))
a9b6de77 2504 return 0;
b59f65fa 2505
a9b6de77 2506 if (unlikely(pmd_val(orig) != pmd_val(*pmdp))) {
3b78d834 2507 undo_dev_pagemap(nr, nr_start, flags, pages);
a9b6de77
DW
2508 return 0;
2509 }
2510 return 1;
b59f65fa
KS
2511}
2512
a9b6de77 2513static int __gup_device_huge_pud(pud_t orig, pud_t *pudp, unsigned long addr,
86dfbed4
JH
2514 unsigned long end, unsigned int flags,
2515 struct page **pages, int *nr)
b59f65fa
KS
2516{
2517 unsigned long fault_pfn;
a9b6de77
DW
2518 int nr_start = *nr;
2519
2520 fault_pfn = pud_pfn(orig) + ((addr & ~PUD_MASK) >> PAGE_SHIFT);
86dfbed4 2521 if (!__gup_device_huge(fault_pfn, addr, end, flags, pages, nr))
a9b6de77 2522 return 0;
b59f65fa 2523
a9b6de77 2524 if (unlikely(pud_val(orig) != pud_val(*pudp))) {
3b78d834 2525 undo_dev_pagemap(nr, nr_start, flags, pages);
a9b6de77
DW
2526 return 0;
2527 }
2528 return 1;
b59f65fa
KS
2529}
2530#else
a9b6de77 2531static int __gup_device_huge_pmd(pmd_t orig, pmd_t *pmdp, unsigned long addr,
86dfbed4
JH
2532 unsigned long end, unsigned int flags,
2533 struct page **pages, int *nr)
b59f65fa
KS
2534{
2535 BUILD_BUG();
2536 return 0;
2537}
2538
a9b6de77 2539static int __gup_device_huge_pud(pud_t pud, pud_t *pudp, unsigned long addr,
86dfbed4
JH
2540 unsigned long end, unsigned int flags,
2541 struct page **pages, int *nr)
b59f65fa
KS
2542{
2543 BUILD_BUG();
2544 return 0;
2545}
2546#endif
2547
a43e9820
JH
2548static int record_subpages(struct page *page, unsigned long addr,
2549 unsigned long end, struct page **pages)
2550{
2551 int nr;
2552
c228afb1
MWO
2553 for (nr = 0; addr != end; nr++, addr += PAGE_SIZE)
2554 pages[nr] = nth_page(page, nr);
a43e9820
JH
2555
2556 return nr;
2557}
2558
cbd34da7
CH
2559#ifdef CONFIG_ARCH_HAS_HUGEPD
2560static unsigned long hugepte_addr_end(unsigned long addr, unsigned long end,
2561 unsigned long sz)
2562{
2563 unsigned long __boundary = (addr + sz) & ~(sz-1);
2564 return (__boundary - 1 < end - 1) ? __boundary : end;
2565}
2566
2567static int gup_hugepte(pte_t *ptep, unsigned long sz, unsigned long addr,
0cd22afd
JH
2568 unsigned long end, unsigned int flags,
2569 struct page **pages, int *nr)
cbd34da7
CH
2570{
2571 unsigned long pte_end;
09a1626e
MWO
2572 struct page *page;
2573 struct folio *folio;
cbd34da7
CH
2574 pte_t pte;
2575 int refs;
2576
2577 pte_end = (addr + sz) & ~(sz-1);
2578 if (pte_end < end)
2579 end = pte_end;
2580
55ca2263 2581 pte = huge_ptep_get(ptep);
cbd34da7 2582
0cd22afd 2583 if (!pte_access_permitted(pte, flags & FOLL_WRITE))
cbd34da7
CH
2584 return 0;
2585
2586 /* hugepages are never "special" */
2587 VM_BUG_ON(!pfn_valid(pte_pfn(pte)));
2588
09a1626e 2589 page = nth_page(pte_page(pte), (addr & (sz - 1)) >> PAGE_SHIFT);
a43e9820 2590 refs = record_subpages(page, addr, end, pages + *nr);
cbd34da7 2591
09a1626e
MWO
2592 folio = try_grab_folio(page, refs, flags);
2593 if (!folio)
cbd34da7 2594 return 0;
cbd34da7
CH
2595
2596 if (unlikely(pte_val(pte) != pte_val(*ptep))) {
09a1626e 2597 gup_put_folio(folio, refs, flags);
cbd34da7
CH
2598 return 0;
2599 }
2600
a7f22660
DH
2601 if (!pte_write(pte) && gup_must_unshare(flags, &folio->page)) {
2602 gup_put_folio(folio, refs, flags);
2603 return 0;
2604 }
2605
a43e9820 2606 *nr += refs;
09a1626e 2607 folio_set_referenced(folio);
cbd34da7
CH
2608 return 1;
2609}
2610
2611static int gup_huge_pd(hugepd_t hugepd, unsigned long addr,
0cd22afd 2612 unsigned int pdshift, unsigned long end, unsigned int flags,
cbd34da7
CH
2613 struct page **pages, int *nr)
2614{
2615 pte_t *ptep;
2616 unsigned long sz = 1UL << hugepd_shift(hugepd);
2617 unsigned long next;
2618
2619 ptep = hugepte_offset(hugepd, addr, pdshift);
2620 do {
2621 next = hugepte_addr_end(addr, end, sz);
0cd22afd 2622 if (!gup_hugepte(ptep, sz, addr, end, flags, pages, nr))
cbd34da7
CH
2623 return 0;
2624 } while (ptep++, addr = next, addr != end);
2625
2626 return 1;
2627}
2628#else
2629static inline int gup_huge_pd(hugepd_t hugepd, unsigned long addr,
0cd22afd 2630 unsigned int pdshift, unsigned long end, unsigned int flags,
cbd34da7
CH
2631 struct page **pages, int *nr)
2632{
2633 return 0;
2634}
2635#endif /* CONFIG_ARCH_HAS_HUGEPD */
2636
2667f50e 2637static int gup_huge_pmd(pmd_t orig, pmd_t *pmdp, unsigned long addr,
0cd22afd
JH
2638 unsigned long end, unsigned int flags,
2639 struct page **pages, int *nr)
2667f50e 2640{
667ed1f7
MWO
2641 struct page *page;
2642 struct folio *folio;
2667f50e
SC
2643 int refs;
2644
b798bec4 2645 if (!pmd_access_permitted(orig, flags & FOLL_WRITE))
2667f50e
SC
2646 return 0;
2647
7af75561
IW
2648 if (pmd_devmap(orig)) {
2649 if (unlikely(flags & FOLL_LONGTERM))
2650 return 0;
86dfbed4
JH
2651 return __gup_device_huge_pmd(orig, pmdp, addr, end, flags,
2652 pages, nr);
7af75561 2653 }
b59f65fa 2654
c228afb1 2655 page = nth_page(pmd_page(orig), (addr & ~PMD_MASK) >> PAGE_SHIFT);
a43e9820 2656 refs = record_subpages(page, addr, end, pages + *nr);
2667f50e 2657
667ed1f7
MWO
2658 folio = try_grab_folio(page, refs, flags);
2659 if (!folio)
2667f50e 2660 return 0;
2667f50e
SC
2661
2662 if (unlikely(pmd_val(orig) != pmd_val(*pmdp))) {
667ed1f7 2663 gup_put_folio(folio, refs, flags);
2667f50e
SC
2664 return 0;
2665 }
2666
a7f22660
DH
2667 if (!pmd_write(orig) && gup_must_unshare(flags, &folio->page)) {
2668 gup_put_folio(folio, refs, flags);
2669 return 0;
2670 }
2671
a43e9820 2672 *nr += refs;
667ed1f7 2673 folio_set_referenced(folio);
2667f50e
SC
2674 return 1;
2675}
2676
2677static int gup_huge_pud(pud_t orig, pud_t *pudp, unsigned long addr,
86dfbed4
JH
2678 unsigned long end, unsigned int flags,
2679 struct page **pages, int *nr)
2667f50e 2680{
83afb52e
MWO
2681 struct page *page;
2682 struct folio *folio;
2667f50e
SC
2683 int refs;
2684
b798bec4 2685 if (!pud_access_permitted(orig, flags & FOLL_WRITE))
2667f50e
SC
2686 return 0;
2687
7af75561
IW
2688 if (pud_devmap(orig)) {
2689 if (unlikely(flags & FOLL_LONGTERM))
2690 return 0;
86dfbed4
JH
2691 return __gup_device_huge_pud(orig, pudp, addr, end, flags,
2692 pages, nr);
7af75561 2693 }
b59f65fa 2694
c228afb1 2695 page = nth_page(pud_page(orig), (addr & ~PUD_MASK) >> PAGE_SHIFT);
a43e9820 2696 refs = record_subpages(page, addr, end, pages + *nr);
2667f50e 2697
83afb52e
MWO
2698 folio = try_grab_folio(page, refs, flags);
2699 if (!folio)
2667f50e 2700 return 0;
2667f50e
SC
2701
2702 if (unlikely(pud_val(orig) != pud_val(*pudp))) {
83afb52e 2703 gup_put_folio(folio, refs, flags);
2667f50e
SC
2704 return 0;
2705 }
2706
a7f22660
DH
2707 if (!pud_write(orig) && gup_must_unshare(flags, &folio->page)) {
2708 gup_put_folio(folio, refs, flags);
2709 return 0;
2710 }
2711
a43e9820 2712 *nr += refs;
83afb52e 2713 folio_set_referenced(folio);
2667f50e
SC
2714 return 1;
2715}
2716
f30c59e9 2717static int gup_huge_pgd(pgd_t orig, pgd_t *pgdp, unsigned long addr,
b798bec4 2718 unsigned long end, unsigned int flags,
f30c59e9
AK
2719 struct page **pages, int *nr)
2720{
2721 int refs;
2d7919a2
MWO
2722 struct page *page;
2723 struct folio *folio;
f30c59e9 2724
b798bec4 2725 if (!pgd_access_permitted(orig, flags & FOLL_WRITE))
f30c59e9
AK
2726 return 0;
2727
b59f65fa 2728 BUILD_BUG_ON(pgd_devmap(orig));
a43e9820 2729
c228afb1 2730 page = nth_page(pgd_page(orig), (addr & ~PGDIR_MASK) >> PAGE_SHIFT);
a43e9820 2731 refs = record_subpages(page, addr, end, pages + *nr);
f30c59e9 2732
2d7919a2
MWO
2733 folio = try_grab_folio(page, refs, flags);
2734 if (!folio)
f30c59e9 2735 return 0;
f30c59e9
AK
2736
2737 if (unlikely(pgd_val(orig) != pgd_val(*pgdp))) {
2d7919a2 2738 gup_put_folio(folio, refs, flags);
f30c59e9
AK
2739 return 0;
2740 }
2741
a43e9820 2742 *nr += refs;
2d7919a2 2743 folio_set_referenced(folio);
f30c59e9
AK
2744 return 1;
2745}
2746
d3f7b1bb 2747static int gup_pmd_range(pud_t *pudp, pud_t pud, unsigned long addr, unsigned long end,
b798bec4 2748 unsigned int flags, struct page **pages, int *nr)
2667f50e
SC
2749{
2750 unsigned long next;
2751 pmd_t *pmdp;
2752
d3f7b1bb 2753 pmdp = pmd_offset_lockless(pudp, pud, addr);
2667f50e 2754 do {
38c5ce93 2755 pmd_t pmd = READ_ONCE(*pmdp);
2667f50e
SC
2756
2757 next = pmd_addr_end(addr, end);
84c3fc4e 2758 if (!pmd_present(pmd))
2667f50e
SC
2759 return 0;
2760
414fd080
YZ
2761 if (unlikely(pmd_trans_huge(pmd) || pmd_huge(pmd) ||
2762 pmd_devmap(pmd))) {
0cf45986
DH
2763 if (pmd_protnone(pmd) &&
2764 !gup_can_follow_protnone(flags))
2667f50e
SC
2765 return 0;
2766
b798bec4 2767 if (!gup_huge_pmd(pmd, pmdp, addr, next, flags,
2667f50e
SC
2768 pages, nr))
2769 return 0;
2770
f30c59e9
AK
2771 } else if (unlikely(is_hugepd(__hugepd(pmd_val(pmd))))) {
2772 /*
2773 * architecture have different format for hugetlbfs
2774 * pmd format and THP pmd format
2775 */
2776 if (!gup_huge_pd(__hugepd(pmd_val(pmd)), addr,
b798bec4 2777 PMD_SHIFT, next, flags, pages, nr))
f30c59e9 2778 return 0;
70cbc3cc 2779 } else if (!gup_pte_range(pmd, pmdp, addr, next, flags, pages, nr))
2923117b 2780 return 0;
2667f50e
SC
2781 } while (pmdp++, addr = next, addr != end);
2782
2783 return 1;
2784}
2785
d3f7b1bb 2786static int gup_pud_range(p4d_t *p4dp, p4d_t p4d, unsigned long addr, unsigned long end,
b798bec4 2787 unsigned int flags, struct page **pages, int *nr)
2667f50e
SC
2788{
2789 unsigned long next;
2790 pud_t *pudp;
2791
d3f7b1bb 2792 pudp = pud_offset_lockless(p4dp, p4d, addr);
2667f50e 2793 do {
e37c6982 2794 pud_t pud = READ_ONCE(*pudp);
2667f50e
SC
2795
2796 next = pud_addr_end(addr, end);
15494520 2797 if (unlikely(!pud_present(pud)))
2667f50e 2798 return 0;
f30c59e9 2799 if (unlikely(pud_huge(pud))) {
b798bec4 2800 if (!gup_huge_pud(pud, pudp, addr, next, flags,
f30c59e9
AK
2801 pages, nr))
2802 return 0;
2803 } else if (unlikely(is_hugepd(__hugepd(pud_val(pud))))) {
2804 if (!gup_huge_pd(__hugepd(pud_val(pud)), addr,
b798bec4 2805 PUD_SHIFT, next, flags, pages, nr))
2667f50e 2806 return 0;
d3f7b1bb 2807 } else if (!gup_pmd_range(pudp, pud, addr, next, flags, pages, nr))
2667f50e
SC
2808 return 0;
2809 } while (pudp++, addr = next, addr != end);
2810
2811 return 1;
2812}
2813
d3f7b1bb 2814static int gup_p4d_range(pgd_t *pgdp, pgd_t pgd, unsigned long addr, unsigned long end,
b798bec4 2815 unsigned int flags, struct page **pages, int *nr)
c2febafc
KS
2816{
2817 unsigned long next;
2818 p4d_t *p4dp;
2819
d3f7b1bb 2820 p4dp = p4d_offset_lockless(pgdp, pgd, addr);
c2febafc
KS
2821 do {
2822 p4d_t p4d = READ_ONCE(*p4dp);
2823
2824 next = p4d_addr_end(addr, end);
2825 if (p4d_none(p4d))
2826 return 0;
2827 BUILD_BUG_ON(p4d_huge(p4d));
2828 if (unlikely(is_hugepd(__hugepd(p4d_val(p4d))))) {
2829 if (!gup_huge_pd(__hugepd(p4d_val(p4d)), addr,
b798bec4 2830 P4D_SHIFT, next, flags, pages, nr))
c2febafc 2831 return 0;
d3f7b1bb 2832 } else if (!gup_pud_range(p4dp, p4d, addr, next, flags, pages, nr))
c2febafc
KS
2833 return 0;
2834 } while (p4dp++, addr = next, addr != end);
2835
2836 return 1;
2837}
2838
5b65c467 2839static void gup_pgd_range(unsigned long addr, unsigned long end,
b798bec4 2840 unsigned int flags, struct page **pages, int *nr)
5b65c467
KS
2841{
2842 unsigned long next;
2843 pgd_t *pgdp;
2844
2845 pgdp = pgd_offset(current->mm, addr);
2846 do {
2847 pgd_t pgd = READ_ONCE(*pgdp);
2848
2849 next = pgd_addr_end(addr, end);
2850 if (pgd_none(pgd))
2851 return;
2852 if (unlikely(pgd_huge(pgd))) {
b798bec4 2853 if (!gup_huge_pgd(pgd, pgdp, addr, next, flags,
5b65c467
KS
2854 pages, nr))
2855 return;
2856 } else if (unlikely(is_hugepd(__hugepd(pgd_val(pgd))))) {
2857 if (!gup_huge_pd(__hugepd(pgd_val(pgd)), addr,
b798bec4 2858 PGDIR_SHIFT, next, flags, pages, nr))
5b65c467 2859 return;
d3f7b1bb 2860 } else if (!gup_p4d_range(pgdp, pgd, addr, next, flags, pages, nr))
5b65c467
KS
2861 return;
2862 } while (pgdp++, addr = next, addr != end);
2863}
050a9adc
CH
2864#else
2865static inline void gup_pgd_range(unsigned long addr, unsigned long end,
2866 unsigned int flags, struct page **pages, int *nr)
2867{
2868}
2869#endif /* CONFIG_HAVE_FAST_GUP */
5b65c467
KS
2870
2871#ifndef gup_fast_permitted
2872/*
dadbb612 2873 * Check if it's allowed to use get_user_pages_fast_only() for the range, or
5b65c467
KS
2874 * we need to fall back to the slow version:
2875 */
26f4c328 2876static bool gup_fast_permitted(unsigned long start, unsigned long end)
5b65c467 2877{
26f4c328 2878 return true;
5b65c467
KS
2879}
2880#endif
2881
7af75561
IW
2882static int __gup_longterm_unlocked(unsigned long start, int nr_pages,
2883 unsigned int gup_flags, struct page **pages)
2884{
2885 int ret;
2886
2887 /*
2888 * FIXME: FOLL_LONGTERM does not work with
2889 * get_user_pages_unlocked() (see comments in that function)
2890 */
2891 if (gup_flags & FOLL_LONGTERM) {
d8ed45c5 2892 mmap_read_lock(current->mm);
64019a2e 2893 ret = __gup_longterm_locked(current->mm,
7af75561
IW
2894 start, nr_pages,
2895 pages, NULL, gup_flags);
d8ed45c5 2896 mmap_read_unlock(current->mm);
7af75561
IW
2897 } else {
2898 ret = get_user_pages_unlocked(start, nr_pages,
2899 pages, gup_flags);
2900 }
2901
2902 return ret;
2903}
2904
c28b1fc7
JG
2905static unsigned long lockless_pages_from_mm(unsigned long start,
2906 unsigned long end,
2907 unsigned int gup_flags,
2908 struct page **pages)
2909{
2910 unsigned long flags;
2911 int nr_pinned = 0;
57efa1fe 2912 unsigned seq;
c28b1fc7
JG
2913
2914 if (!IS_ENABLED(CONFIG_HAVE_FAST_GUP) ||
2915 !gup_fast_permitted(start, end))
2916 return 0;
2917
57efa1fe
JG
2918 if (gup_flags & FOLL_PIN) {
2919 seq = raw_read_seqcount(&current->mm->write_protect_seq);
2920 if (seq & 1)
2921 return 0;
2922 }
2923
c28b1fc7
JG
2924 /*
2925 * Disable interrupts. The nested form is used, in order to allow full,
2926 * general purpose use of this routine.
2927 *
2928 * With interrupts disabled, we block page table pages from being freed
2929 * from under us. See struct mmu_table_batch comments in
2930 * include/asm-generic/tlb.h for more details.
2931 *
2932 * We do not adopt an rcu_read_lock() here as we also want to block IPIs
2933 * that come from THPs splitting.
2934 */
2935 local_irq_save(flags);
2936 gup_pgd_range(start, end, gup_flags, pages, &nr_pinned);
2937 local_irq_restore(flags);
57efa1fe
JG
2938
2939 /*
2940 * When pinning pages for DMA there could be a concurrent write protect
2941 * from fork() via copy_page_range(), in this case always fail fast GUP.
2942 */
2943 if (gup_flags & FOLL_PIN) {
2944 if (read_seqcount_retry(&current->mm->write_protect_seq, seq)) {
b6a2619c 2945 unpin_user_pages_lockless(pages, nr_pinned);
57efa1fe 2946 return 0;
b6a2619c
DH
2947 } else {
2948 sanity_check_pinned_pages(pages, nr_pinned);
57efa1fe
JG
2949 }
2950 }
c28b1fc7
JG
2951 return nr_pinned;
2952}
2953
2954static int internal_get_user_pages_fast(unsigned long start,
2955 unsigned long nr_pages,
eddb1c22
JH
2956 unsigned int gup_flags,
2957 struct page **pages)
2667f50e 2958{
c28b1fc7
JG
2959 unsigned long len, end;
2960 unsigned long nr_pinned;
2961 int ret;
2667f50e 2962
f4000fdf 2963 if (WARN_ON_ONCE(gup_flags & ~(FOLL_WRITE | FOLL_LONGTERM |
376a34ef 2964 FOLL_FORCE | FOLL_PIN | FOLL_GET |
55b8fe70 2965 FOLL_FAST_ONLY | FOLL_NOFAULT)))
817be129
CH
2966 return -EINVAL;
2967
a458b76a
AA
2968 if (gup_flags & FOLL_PIN)
2969 mm_set_has_pinned_flag(&current->mm->flags);
008cfe44 2970
f81cd178 2971 if (!(gup_flags & FOLL_FAST_ONLY))
da1c55f1 2972 might_lock_read(&current->mm->mmap_lock);
f81cd178 2973
f455c854 2974 start = untagged_addr(start) & PAGE_MASK;
c28b1fc7
JG
2975 len = nr_pages << PAGE_SHIFT;
2976 if (check_add_overflow(start, len, &end))
c61611f7 2977 return 0;
96d4f267 2978 if (unlikely(!access_ok((void __user *)start, len)))
c61611f7 2979 return -EFAULT;
73e10a61 2980
c28b1fc7
JG
2981 nr_pinned = lockless_pages_from_mm(start, end, gup_flags, pages);
2982 if (nr_pinned == nr_pages || gup_flags & FOLL_FAST_ONLY)
2983 return nr_pinned;
2667f50e 2984
c28b1fc7
JG
2985 /* Slow path: try to get the remaining pages with get_user_pages */
2986 start += nr_pinned << PAGE_SHIFT;
2987 pages += nr_pinned;
2988 ret = __gup_longterm_unlocked(start, nr_pages - nr_pinned, gup_flags,
2989 pages);
2990 if (ret < 0) {
2991 /*
2992 * The caller has to unpin the pages we already pinned so
2993 * returning -errno is not an option
2994 */
2995 if (nr_pinned)
2996 return nr_pinned;
2997 return ret;
2667f50e 2998 }
c28b1fc7 2999 return ret + nr_pinned;
2667f50e 3000}
c28b1fc7 3001
dadbb612
SJ
3002/**
3003 * get_user_pages_fast_only() - pin user pages in memory
3004 * @start: starting user address
3005 * @nr_pages: number of pages from start to pin
3006 * @gup_flags: flags modifying pin behaviour
3007 * @pages: array that receives pointers to the pages pinned.
3008 * Should be at least nr_pages long.
3009 *
9e1f0580
JH
3010 * Like get_user_pages_fast() except it's IRQ-safe in that it won't fall back to
3011 * the regular GUP.
3012 * Note a difference with get_user_pages_fast: this always returns the
3013 * number of pages pinned, 0 if no pages were pinned.
3014 *
3015 * If the architecture does not support this function, simply return with no
3016 * pages pinned.
3017 *
3018 * Careful, careful! COW breaking can go either way, so a non-write
3019 * access can get ambiguous page results. If you call this function without
3020 * 'write' set, you'd better be sure that you're ok with that ambiguity.
3021 */
dadbb612
SJ
3022int get_user_pages_fast_only(unsigned long start, int nr_pages,
3023 unsigned int gup_flags, struct page **pages)
9e1f0580 3024{
376a34ef 3025 int nr_pinned;
9e1f0580
JH
3026 /*
3027 * Internally (within mm/gup.c), gup fast variants must set FOLL_GET,
3028 * because gup fast is always a "pin with a +1 page refcount" request.
376a34ef
JH
3029 *
3030 * FOLL_FAST_ONLY is required in order to match the API description of
3031 * this routine: no fall back to regular ("slow") GUP.
9e1f0580 3032 */
dadbb612 3033 gup_flags |= FOLL_GET | FOLL_FAST_ONLY;
9e1f0580 3034
376a34ef
JH
3035 nr_pinned = internal_get_user_pages_fast(start, nr_pages, gup_flags,
3036 pages);
9e1f0580
JH
3037
3038 /*
376a34ef
JH
3039 * As specified in the API description above, this routine is not
3040 * allowed to return negative values. However, the common core
3041 * routine internal_get_user_pages_fast() *can* return -errno.
3042 * Therefore, correct for that here:
9e1f0580 3043 */
376a34ef
JH
3044 if (nr_pinned < 0)
3045 nr_pinned = 0;
9e1f0580
JH
3046
3047 return nr_pinned;
3048}
dadbb612 3049EXPORT_SYMBOL_GPL(get_user_pages_fast_only);
9e1f0580 3050
eddb1c22
JH
3051/**
3052 * get_user_pages_fast() - pin user pages in memory
3faa52c0
JH
3053 * @start: starting user address
3054 * @nr_pages: number of pages from start to pin
3055 * @gup_flags: flags modifying pin behaviour
3056 * @pages: array that receives pointers to the pages pinned.
3057 * Should be at least nr_pages long.
eddb1c22 3058 *
c1e8d7c6 3059 * Attempt to pin user pages in memory without taking mm->mmap_lock.
eddb1c22
JH
3060 * If not successful, it will fall back to taking the lock and
3061 * calling get_user_pages().
3062 *
3063 * Returns number of pages pinned. This may be fewer than the number requested.
3064 * If nr_pages is 0 or negative, returns 0. If no pages were pinned, returns
3065 * -errno.
3066 */
3067int get_user_pages_fast(unsigned long start, int nr_pages,
3068 unsigned int gup_flags, struct page **pages)
3069{
447f3e45 3070 if (!is_valid_gup_flags(gup_flags))
eddb1c22
JH
3071 return -EINVAL;
3072
94202f12
JH
3073 /*
3074 * The caller may or may not have explicitly set FOLL_GET; either way is
3075 * OK. However, internally (within mm/gup.c), gup fast variants must set
3076 * FOLL_GET, because gup fast is always a "pin with a +1 page refcount"
3077 * request.
3078 */
3079 gup_flags |= FOLL_GET;
eddb1c22
JH
3080 return internal_get_user_pages_fast(start, nr_pages, gup_flags, pages);
3081}
050a9adc 3082EXPORT_SYMBOL_GPL(get_user_pages_fast);
eddb1c22
JH
3083
3084/**
3085 * pin_user_pages_fast() - pin user pages in memory without taking locks
3086 *
3faa52c0
JH
3087 * @start: starting user address
3088 * @nr_pages: number of pages from start to pin
3089 * @gup_flags: flags modifying pin behaviour
3090 * @pages: array that receives pointers to the pages pinned.
3091 * Should be at least nr_pages long.
3092 *
3093 * Nearly the same as get_user_pages_fast(), except that FOLL_PIN is set. See
3094 * get_user_pages_fast() for documentation on the function arguments, because
3095 * the arguments here are identical.
3096 *
3097 * FOLL_PIN means that the pages must be released via unpin_user_page(). Please
72ef5e52 3098 * see Documentation/core-api/pin_user_pages.rst for further details.
eddb1c22
JH
3099 */
3100int pin_user_pages_fast(unsigned long start, int nr_pages,
3101 unsigned int gup_flags, struct page **pages)
3102{
3faa52c0
JH
3103 /* FOLL_GET and FOLL_PIN are mutually exclusive. */
3104 if (WARN_ON_ONCE(gup_flags & FOLL_GET))
3105 return -EINVAL;
3106
0768c8de
YN
3107 if (WARN_ON_ONCE(!pages))
3108 return -EINVAL;
3109
3faa52c0
JH
3110 gup_flags |= FOLL_PIN;
3111 return internal_get_user_pages_fast(start, nr_pages, gup_flags, pages);
eddb1c22
JH
3112}
3113EXPORT_SYMBOL_GPL(pin_user_pages_fast);
3114
104acc32 3115/*
dadbb612
SJ
3116 * This is the FOLL_PIN equivalent of get_user_pages_fast_only(). Behavior
3117 * is the same, except that this one sets FOLL_PIN instead of FOLL_GET.
104acc32
JH
3118 *
3119 * The API rules are the same, too: no negative values may be returned.
3120 */
3121int pin_user_pages_fast_only(unsigned long start, int nr_pages,
3122 unsigned int gup_flags, struct page **pages)
3123{
3124 int nr_pinned;
3125
3126 /*
3127 * FOLL_GET and FOLL_PIN are mutually exclusive. Note that the API
3128 * rules require returning 0, rather than -errno:
3129 */
3130 if (WARN_ON_ONCE(gup_flags & FOLL_GET))
3131 return 0;
0768c8de
YN
3132
3133 if (WARN_ON_ONCE(!pages))
3134 return 0;
104acc32
JH
3135 /*
3136 * FOLL_FAST_ONLY is required in order to match the API description of
3137 * this routine: no fall back to regular ("slow") GUP.
3138 */
3139 gup_flags |= (FOLL_PIN | FOLL_FAST_ONLY);
3140 nr_pinned = internal_get_user_pages_fast(start, nr_pages, gup_flags,
3141 pages);
3142 /*
3143 * This routine is not allowed to return negative values. However,
3144 * internal_get_user_pages_fast() *can* return -errno. Therefore,
3145 * correct for that here:
3146 */
3147 if (nr_pinned < 0)
3148 nr_pinned = 0;
3149
3150 return nr_pinned;
3151}
3152EXPORT_SYMBOL_GPL(pin_user_pages_fast_only);
3153
eddb1c22 3154/**
64019a2e 3155 * pin_user_pages_remote() - pin pages of a remote process
eddb1c22 3156 *
3faa52c0
JH
3157 * @mm: mm_struct of target mm
3158 * @start: starting user address
3159 * @nr_pages: number of pages from start to pin
3160 * @gup_flags: flags modifying lookup behaviour
3161 * @pages: array that receives pointers to the pages pinned.
0768c8de 3162 * Should be at least nr_pages long.
3faa52c0
JH
3163 * @vmas: array of pointers to vmas corresponding to each page.
3164 * Or NULL if the caller does not require them.
3165 * @locked: pointer to lock flag indicating whether lock is held and
3166 * subsequently whether VM_FAULT_RETRY functionality can be
3167 * utilised. Lock must initially be held.
3168 *
3169 * Nearly the same as get_user_pages_remote(), except that FOLL_PIN is set. See
3170 * get_user_pages_remote() for documentation on the function arguments, because
3171 * the arguments here are identical.
3172 *
3173 * FOLL_PIN means that the pages must be released via unpin_user_page(). Please
72ef5e52 3174 * see Documentation/core-api/pin_user_pages.rst for details.
eddb1c22 3175 */
64019a2e 3176long pin_user_pages_remote(struct mm_struct *mm,
eddb1c22
JH
3177 unsigned long start, unsigned long nr_pages,
3178 unsigned int gup_flags, struct page **pages,
3179 struct vm_area_struct **vmas, int *locked)
3180{
3faa52c0
JH
3181 /* FOLL_GET and FOLL_PIN are mutually exclusive. */
3182 if (WARN_ON_ONCE(gup_flags & FOLL_GET))
3183 return -EINVAL;
3184
0768c8de
YN
3185 if (WARN_ON_ONCE(!pages))
3186 return -EINVAL;
3187
3faa52c0 3188 gup_flags |= FOLL_PIN;
64019a2e 3189 return __get_user_pages_remote(mm, start, nr_pages, gup_flags,
3faa52c0 3190 pages, vmas, locked);
eddb1c22
JH
3191}
3192EXPORT_SYMBOL(pin_user_pages_remote);
3193
3194/**
3195 * pin_user_pages() - pin user pages in memory for use by other devices
3196 *
3faa52c0
JH
3197 * @start: starting user address
3198 * @nr_pages: number of pages from start to pin
3199 * @gup_flags: flags modifying lookup behaviour
3200 * @pages: array that receives pointers to the pages pinned.
0768c8de 3201 * Should be at least nr_pages long.
3faa52c0
JH
3202 * @vmas: array of pointers to vmas corresponding to each page.
3203 * Or NULL if the caller does not require them.
3204 *
3205 * Nearly the same as get_user_pages(), except that FOLL_TOUCH is not set, and
3206 * FOLL_PIN is set.
3207 *
3208 * FOLL_PIN means that the pages must be released via unpin_user_page(). Please
72ef5e52 3209 * see Documentation/core-api/pin_user_pages.rst for details.
eddb1c22
JH
3210 */
3211long pin_user_pages(unsigned long start, unsigned long nr_pages,
3212 unsigned int gup_flags, struct page **pages,
3213 struct vm_area_struct **vmas)
3214{
3faa52c0
JH
3215 /* FOLL_GET and FOLL_PIN are mutually exclusive. */
3216 if (WARN_ON_ONCE(gup_flags & FOLL_GET))
3217 return -EINVAL;
3218
0768c8de
YN
3219 if (WARN_ON_ONCE(!pages))
3220 return -EINVAL;
3221
3faa52c0 3222 gup_flags |= FOLL_PIN;
64019a2e 3223 return __gup_longterm_locked(current->mm, start, nr_pages,
3faa52c0 3224 pages, vmas, gup_flags);
eddb1c22
JH
3225}
3226EXPORT_SYMBOL(pin_user_pages);
91429023
JH
3227
3228/*
3229 * pin_user_pages_unlocked() is the FOLL_PIN variant of
3230 * get_user_pages_unlocked(). Behavior is the same, except that this one sets
3231 * FOLL_PIN and rejects FOLL_GET.
3232 */
3233long pin_user_pages_unlocked(unsigned long start, unsigned long nr_pages,
3234 struct page **pages, unsigned int gup_flags)
3235{
3236 /* FOLL_GET and FOLL_PIN are mutually exclusive. */
3237 if (WARN_ON_ONCE(gup_flags & FOLL_GET))
3238 return -EINVAL;
3239
0768c8de
YN
3240 if (WARN_ON_ONCE(!pages))
3241 return -EINVAL;
3242
91429023
JH
3243 gup_flags |= FOLL_PIN;
3244 return get_user_pages_unlocked(start, nr_pages, pages, gup_flags);
3245}
3246EXPORT_SYMBOL(pin_user_pages_unlocked);