powerpc/pgtable: support __HAVE_ARCH_PTE_SWP_EXCLUSIVE for book3s
[linux-block.git] / mm / gup.c
CommitLineData
457c8996 1// SPDX-License-Identifier: GPL-2.0-only
4bbd4c77
KS
2#include <linux/kernel.h>
3#include <linux/errno.h>
4#include <linux/err.h>
5#include <linux/spinlock.h>
6
4bbd4c77 7#include <linux/mm.h>
3565fce3 8#include <linux/memremap.h>
4bbd4c77
KS
9#include <linux/pagemap.h>
10#include <linux/rmap.h>
11#include <linux/swap.h>
12#include <linux/swapops.h>
1507f512 13#include <linux/secretmem.h>
4bbd4c77 14
174cd4b1 15#include <linux/sched/signal.h>
2667f50e 16#include <linux/rwsem.h>
f30c59e9 17#include <linux/hugetlb.h>
9a4e9f3b
AK
18#include <linux/migrate.h>
19#include <linux/mm_inline.h>
20#include <linux/sched/mm.h>
1027e443 21
33a709b2 22#include <asm/mmu_context.h>
1027e443 23#include <asm/tlbflush.h>
2667f50e 24
4bbd4c77
KS
25#include "internal.h"
26
df06b37f
KB
27struct follow_page_context {
28 struct dev_pagemap *pgmap;
29 unsigned int page_mask;
30};
31
b6a2619c
DH
32static inline void sanity_check_pinned_pages(struct page **pages,
33 unsigned long npages)
34{
35 if (!IS_ENABLED(CONFIG_DEBUG_VM))
36 return;
37
38 /*
39 * We only pin anonymous pages if they are exclusive. Once pinned, we
40 * can no longer turn them possibly shared and PageAnonExclusive() will
41 * stick around until the page is freed.
42 *
43 * We'd like to verify that our pinned anonymous pages are still mapped
44 * exclusively. The issue with anon THP is that we don't know how
45 * they are/were mapped when pinning them. However, for anon
46 * THP we can assume that either the given page (PTE-mapped THP) or
47 * the head page (PMD-mapped THP) should be PageAnonExclusive(). If
48 * neither is the case, there is certainly something wrong.
49 */
50 for (; npages; npages--, pages++) {
51 struct page *page = *pages;
52 struct folio *folio = page_folio(page);
53
54 if (!folio_test_anon(folio))
55 continue;
56 if (!folio_test_large(folio) || folio_test_hugetlb(folio))
57 VM_BUG_ON_PAGE(!PageAnonExclusive(&folio->page), page);
58 else
59 /* Either a PTE-mapped or a PMD-mapped THP. */
60 VM_BUG_ON_PAGE(!PageAnonExclusive(&folio->page) &&
61 !PageAnonExclusive(page), page);
62 }
63}
64
cd1adf1b 65/*
ece1ed7b 66 * Return the folio with ref appropriately incremented,
cd1adf1b 67 * or NULL if that failed.
a707cdd5 68 */
ece1ed7b 69static inline struct folio *try_get_folio(struct page *page, int refs)
a707cdd5 70{
ece1ed7b 71 struct folio *folio;
a707cdd5 72
59409373 73retry:
ece1ed7b
MWO
74 folio = page_folio(page);
75 if (WARN_ON_ONCE(folio_ref_count(folio) < 0))
a707cdd5 76 return NULL;
ece1ed7b 77 if (unlikely(!folio_ref_try_add_rcu(folio, refs)))
a707cdd5 78 return NULL;
c24d3732
JH
79
80 /*
ece1ed7b
MWO
81 * At this point we have a stable reference to the folio; but it
82 * could be that between calling page_folio() and the refcount
83 * increment, the folio was split, in which case we'd end up
84 * holding a reference on a folio that has nothing to do with the page
c24d3732 85 * we were given anymore.
ece1ed7b
MWO
86 * So now that the folio is stable, recheck that the page still
87 * belongs to this folio.
c24d3732 88 */
ece1ed7b
MWO
89 if (unlikely(page_folio(page) != folio)) {
90 folio_put_refs(folio, refs);
59409373 91 goto retry;
c24d3732
JH
92 }
93
ece1ed7b 94 return folio;
a707cdd5
JH
95}
96
3967db22 97/**
ece1ed7b 98 * try_grab_folio() - Attempt to get or pin a folio.
3967db22 99 * @page: pointer to page to be grabbed
ece1ed7b 100 * @refs: the value to (effectively) add to the folio's refcount
3967db22
JH
101 * @flags: gup flags: these are the FOLL_* flag values.
102 *
3faa52c0 103 * "grab" names in this file mean, "look at flags to decide whether to use
ece1ed7b 104 * FOLL_PIN or FOLL_GET behavior, when incrementing the folio's refcount.
3faa52c0
JH
105 *
106 * Either FOLL_PIN or FOLL_GET (or neither) must be set, but not both at the
107 * same time. (That's true throughout the get_user_pages*() and
108 * pin_user_pages*() APIs.) Cases:
109 *
ece1ed7b 110 * FOLL_GET: folio's refcount will be incremented by @refs.
3967db22 111 *
ece1ed7b
MWO
112 * FOLL_PIN on large folios: folio's refcount will be incremented by
113 * @refs, and its compound_pincount will be incremented by @refs.
3967db22 114 *
ece1ed7b 115 * FOLL_PIN on single-page folios: folio's refcount will be incremented by
5232c63f 116 * @refs * GUP_PIN_COUNTING_BIAS.
3faa52c0 117 *
ece1ed7b
MWO
118 * Return: The folio containing @page (with refcount appropriately
119 * incremented) for success, or NULL upon failure. If neither FOLL_GET
120 * nor FOLL_PIN was set, that's considered failure, and furthermore,
121 * a likely bug in the caller, so a warning is also emitted.
3faa52c0 122 */
ece1ed7b 123struct folio *try_grab_folio(struct page *page, int refs, unsigned int flags)
3faa52c0
JH
124{
125 if (flags & FOLL_GET)
ece1ed7b 126 return try_get_folio(page, refs);
3faa52c0 127 else if (flags & FOLL_PIN) {
ece1ed7b
MWO
128 struct folio *folio;
129
df3a0a21 130 /*
d1e153fe
PT
131 * Can't do FOLL_LONGTERM + FOLL_PIN gup fast path if not in a
132 * right zone, so fail and let the caller fall back to the slow
133 * path.
df3a0a21 134 */
d1e153fe
PT
135 if (unlikely((flags & FOLL_LONGTERM) &&
136 !is_pinnable_page(page)))
df3a0a21
PL
137 return NULL;
138
c24d3732
JH
139 /*
140 * CAUTION: Don't use compound_head() on the page before this
141 * point, the result won't be stable.
142 */
ece1ed7b
MWO
143 folio = try_get_folio(page, refs);
144 if (!folio)
c24d3732
JH
145 return NULL;
146
47e29d32 147 /*
ece1ed7b 148 * When pinning a large folio, use an exact count to track it.
47e29d32 149 *
ece1ed7b
MWO
150 * However, be sure to *also* increment the normal folio
151 * refcount field at least once, so that the folio really
78d9d6ce 152 * is pinned. That's why the refcount from the earlier
ece1ed7b 153 * try_get_folio() is left intact.
47e29d32 154 */
ece1ed7b
MWO
155 if (folio_test_large(folio))
156 atomic_add(refs, folio_pincount_ptr(folio));
c24d3732 157 else
ece1ed7b
MWO
158 folio_ref_add(folio,
159 refs * (GUP_PIN_COUNTING_BIAS - 1));
160 node_stat_mod_folio(folio, NR_FOLL_PIN_ACQUIRED, refs);
47e29d32 161
ece1ed7b 162 return folio;
3faa52c0
JH
163 }
164
165 WARN_ON_ONCE(1);
166 return NULL;
167}
168
d8ddc099 169static void gup_put_folio(struct folio *folio, int refs, unsigned int flags)
4509b42c
JG
170{
171 if (flags & FOLL_PIN) {
d8ddc099
MWO
172 node_stat_mod_folio(folio, NR_FOLL_PIN_RELEASED, refs);
173 if (folio_test_large(folio))
174 atomic_sub(refs, folio_pincount_ptr(folio));
4509b42c
JG
175 else
176 refs *= GUP_PIN_COUNTING_BIAS;
177 }
178
d8ddc099 179 folio_put_refs(folio, refs);
4509b42c
JG
180}
181
3faa52c0
JH
182/**
183 * try_grab_page() - elevate a page's refcount by a flag-dependent amount
5fec0719
MWO
184 * @page: pointer to page to be grabbed
185 * @flags: gup flags: these are the FOLL_* flag values.
3faa52c0
JH
186 *
187 * This might not do anything at all, depending on the flags argument.
188 *
189 * "grab" names in this file mean, "look at flags to decide whether to use
190 * FOLL_PIN or FOLL_GET behavior, when incrementing the page's refcount.
191 *
3faa52c0 192 * Either FOLL_PIN or FOLL_GET (or neither) may be set, but not both at the same
ece1ed7b 193 * time. Cases: please see the try_grab_folio() documentation, with
3967db22 194 * "refs=1".
3faa52c0
JH
195 *
196 * Return: true for success, or if no action was required (if neither FOLL_PIN
197 * nor FOLL_GET was set, nothing is done). False for failure: FOLL_GET or
198 * FOLL_PIN was set, but the page could not be grabbed.
199 */
200bool __must_check try_grab_page(struct page *page, unsigned int flags)
201{
5fec0719
MWO
202 struct folio *folio = page_folio(page);
203
c36c04c2 204 WARN_ON_ONCE((flags & (FOLL_GET | FOLL_PIN)) == (FOLL_GET | FOLL_PIN));
5fec0719
MWO
205 if (WARN_ON_ONCE(folio_ref_count(folio) <= 0))
206 return false;
3faa52c0 207
c36c04c2 208 if (flags & FOLL_GET)
5fec0719 209 folio_ref_inc(folio);
c36c04c2 210 else if (flags & FOLL_PIN) {
c36c04c2 211 /*
5fec0719 212 * Similar to try_grab_folio(): be sure to *also*
78d9d6ce
MWO
213 * increment the normal page refcount field at least once,
214 * so that the page really is pinned.
c36c04c2 215 */
5fec0719
MWO
216 if (folio_test_large(folio)) {
217 folio_ref_add(folio, 1);
218 atomic_add(1, folio_pincount_ptr(folio));
8ea2979c 219 } else {
5fec0719 220 folio_ref_add(folio, GUP_PIN_COUNTING_BIAS);
8ea2979c 221 }
c36c04c2 222
5fec0719 223 node_stat_mod_folio(folio, NR_FOLL_PIN_ACQUIRED, 1);
c36c04c2
JH
224 }
225
226 return true;
3faa52c0
JH
227}
228
3faa52c0
JH
229/**
230 * unpin_user_page() - release a dma-pinned page
231 * @page: pointer to page to be released
232 *
233 * Pages that were pinned via pin_user_pages*() must be released via either
234 * unpin_user_page(), or one of the unpin_user_pages*() routines. This is so
235 * that such pages can be separately tracked and uniquely handled. In
236 * particular, interactions with RDMA and filesystems need special handling.
237 */
238void unpin_user_page(struct page *page)
239{
b6a2619c 240 sanity_check_pinned_pages(&page, 1);
d8ddc099 241 gup_put_folio(page_folio(page), 1, FOLL_PIN);
3faa52c0
JH
242}
243EXPORT_SYMBOL(unpin_user_page);
244
659508f9 245static inline struct folio *gup_folio_range_next(struct page *start,
8f39f5fc 246 unsigned long npages, unsigned long i, unsigned int *ntails)
458a4f78 247{
659508f9
MWO
248 struct page *next = nth_page(start, i);
249 struct folio *folio = page_folio(next);
458a4f78
JM
250 unsigned int nr = 1;
251
659508f9 252 if (folio_test_large(folio))
4c654229 253 nr = min_t(unsigned int, npages - i,
659508f9 254 folio_nr_pages(folio) - folio_page_idx(folio, next));
458a4f78 255
458a4f78 256 *ntails = nr;
659508f9 257 return folio;
458a4f78
JM
258}
259
12521c76 260static inline struct folio *gup_folio_next(struct page **list,
28297dbc 261 unsigned long npages, unsigned long i, unsigned int *ntails)
8745d7f6 262{
12521c76 263 struct folio *folio = page_folio(list[i]);
8745d7f6
JM
264 unsigned int nr;
265
8745d7f6 266 for (nr = i + 1; nr < npages; nr++) {
12521c76 267 if (page_folio(list[nr]) != folio)
8745d7f6
JM
268 break;
269 }
270
8745d7f6 271 *ntails = nr - i;
12521c76 272 return folio;
8745d7f6
JM
273}
274
fc1d8e7c 275/**
f1f6a7dd 276 * unpin_user_pages_dirty_lock() - release and optionally dirty gup-pinned pages
2d15eb31 277 * @pages: array of pages to be maybe marked dirty, and definitely released.
fc1d8e7c 278 * @npages: number of pages in the @pages array.
2d15eb31 279 * @make_dirty: whether to mark the pages dirty
fc1d8e7c
JH
280 *
281 * "gup-pinned page" refers to a page that has had one of the get_user_pages()
282 * variants called on that page.
283 *
284 * For each page in the @pages array, make that page (or its head page, if a
2d15eb31 285 * compound page) dirty, if @make_dirty is true, and if the page was previously
f1f6a7dd
JH
286 * listed as clean. In any case, releases all pages using unpin_user_page(),
287 * possibly via unpin_user_pages(), for the non-dirty case.
fc1d8e7c 288 *
f1f6a7dd 289 * Please see the unpin_user_page() documentation for details.
fc1d8e7c 290 *
2d15eb31 291 * set_page_dirty_lock() is used internally. If instead, set_page_dirty() is
292 * required, then the caller should a) verify that this is really correct,
293 * because _lock() is usually required, and b) hand code it:
f1f6a7dd 294 * set_page_dirty_lock(), unpin_user_page().
fc1d8e7c
JH
295 *
296 */
f1f6a7dd
JH
297void unpin_user_pages_dirty_lock(struct page **pages, unsigned long npages,
298 bool make_dirty)
fc1d8e7c 299{
12521c76
MWO
300 unsigned long i;
301 struct folio *folio;
302 unsigned int nr;
2d15eb31 303
304 if (!make_dirty) {
f1f6a7dd 305 unpin_user_pages(pages, npages);
2d15eb31 306 return;
307 }
308
b6a2619c 309 sanity_check_pinned_pages(pages, npages);
12521c76
MWO
310 for (i = 0; i < npages; i += nr) {
311 folio = gup_folio_next(pages, npages, i, &nr);
2d15eb31 312 /*
313 * Checking PageDirty at this point may race with
314 * clear_page_dirty_for_io(), but that's OK. Two key
315 * cases:
316 *
317 * 1) This code sees the page as already dirty, so it
318 * skips the call to set_page_dirty(). That could happen
319 * because clear_page_dirty_for_io() called
320 * page_mkclean(), followed by set_page_dirty().
321 * However, now the page is going to get written back,
322 * which meets the original intention of setting it
323 * dirty, so all is well: clear_page_dirty_for_io() goes
324 * on to call TestClearPageDirty(), and write the page
325 * back.
326 *
327 * 2) This code sees the page as clean, so it calls
328 * set_page_dirty(). The page stays dirty, despite being
329 * written back, so it gets written back again in the
330 * next writeback cycle. This is harmless.
331 */
12521c76
MWO
332 if (!folio_test_dirty(folio)) {
333 folio_lock(folio);
334 folio_mark_dirty(folio);
335 folio_unlock(folio);
336 }
337 gup_put_folio(folio, nr, FOLL_PIN);
2d15eb31 338 }
fc1d8e7c 339}
f1f6a7dd 340EXPORT_SYMBOL(unpin_user_pages_dirty_lock);
fc1d8e7c 341
458a4f78
JM
342/**
343 * unpin_user_page_range_dirty_lock() - release and optionally dirty
344 * gup-pinned page range
345 *
346 * @page: the starting page of a range maybe marked dirty, and definitely released.
347 * @npages: number of consecutive pages to release.
348 * @make_dirty: whether to mark the pages dirty
349 *
350 * "gup-pinned page range" refers to a range of pages that has had one of the
351 * pin_user_pages() variants called on that page.
352 *
353 * For the page ranges defined by [page .. page+npages], make that range (or
354 * its head pages, if a compound page) dirty, if @make_dirty is true, and if the
355 * page range was previously listed as clean.
356 *
357 * set_page_dirty_lock() is used internally. If instead, set_page_dirty() is
358 * required, then the caller should a) verify that this is really correct,
359 * because _lock() is usually required, and b) hand code it:
360 * set_page_dirty_lock(), unpin_user_page().
361 *
362 */
363void unpin_user_page_range_dirty_lock(struct page *page, unsigned long npages,
364 bool make_dirty)
365{
659508f9
MWO
366 unsigned long i;
367 struct folio *folio;
368 unsigned int nr;
369
370 for (i = 0; i < npages; i += nr) {
371 folio = gup_folio_range_next(page, npages, i, &nr);
372 if (make_dirty && !folio_test_dirty(folio)) {
373 folio_lock(folio);
374 folio_mark_dirty(folio);
375 folio_unlock(folio);
376 }
377 gup_put_folio(folio, nr, FOLL_PIN);
458a4f78
JM
378 }
379}
380EXPORT_SYMBOL(unpin_user_page_range_dirty_lock);
381
b6a2619c
DH
382static void unpin_user_pages_lockless(struct page **pages, unsigned long npages)
383{
384 unsigned long i;
385 struct folio *folio;
386 unsigned int nr;
387
388 /*
389 * Don't perform any sanity checks because we might have raced with
390 * fork() and some anonymous pages might now actually be shared --
391 * which is why we're unpinning after all.
392 */
393 for (i = 0; i < npages; i += nr) {
394 folio = gup_folio_next(pages, npages, i, &nr);
395 gup_put_folio(folio, nr, FOLL_PIN);
396 }
397}
398
fc1d8e7c 399/**
f1f6a7dd 400 * unpin_user_pages() - release an array of gup-pinned pages.
fc1d8e7c
JH
401 * @pages: array of pages to be marked dirty and released.
402 * @npages: number of pages in the @pages array.
403 *
f1f6a7dd 404 * For each page in the @pages array, release the page using unpin_user_page().
fc1d8e7c 405 *
f1f6a7dd 406 * Please see the unpin_user_page() documentation for details.
fc1d8e7c 407 */
f1f6a7dd 408void unpin_user_pages(struct page **pages, unsigned long npages)
fc1d8e7c 409{
12521c76
MWO
410 unsigned long i;
411 struct folio *folio;
412 unsigned int nr;
fc1d8e7c 413
146608bb
JH
414 /*
415 * If this WARN_ON() fires, then the system *might* be leaking pages (by
416 * leaving them pinned), but probably not. More likely, gup/pup returned
417 * a hard -ERRNO error to the caller, who erroneously passed it here.
418 */
419 if (WARN_ON(IS_ERR_VALUE(npages)))
420 return;
31b912de 421
b6a2619c 422 sanity_check_pinned_pages(pages, npages);
12521c76
MWO
423 for (i = 0; i < npages; i += nr) {
424 folio = gup_folio_next(pages, npages, i, &nr);
425 gup_put_folio(folio, nr, FOLL_PIN);
e7602748 426 }
fc1d8e7c 427}
f1f6a7dd 428EXPORT_SYMBOL(unpin_user_pages);
fc1d8e7c 429
a458b76a
AA
430/*
431 * Set the MMF_HAS_PINNED if not set yet; after set it'll be there for the mm's
432 * lifecycle. Avoid setting the bit unless necessary, or it might cause write
433 * cache bouncing on large SMP machines for concurrent pinned gups.
434 */
435static inline void mm_set_has_pinned_flag(unsigned long *mm_flags)
436{
437 if (!test_bit(MMF_HAS_PINNED, mm_flags))
438 set_bit(MMF_HAS_PINNED, mm_flags);
439}
440
050a9adc 441#ifdef CONFIG_MMU
69e68b4f
KS
442static struct page *no_page_table(struct vm_area_struct *vma,
443 unsigned int flags)
4bbd4c77 444{
69e68b4f
KS
445 /*
446 * When core dumping an enormous anonymous area that nobody
447 * has touched so far, we don't want to allocate unnecessary pages or
448 * page tables. Return error instead of NULL to skip handle_mm_fault,
449 * then get_dump_page() will return NULL to leave a hole in the dump.
450 * But we can only make this optimization where a hole would surely
451 * be zero-filled if handle_mm_fault() actually did handle it.
452 */
a0137f16
AK
453 if ((flags & FOLL_DUMP) &&
454 (vma_is_anonymous(vma) || !vma->vm_ops->fault))
69e68b4f
KS
455 return ERR_PTR(-EFAULT);
456 return NULL;
457}
4bbd4c77 458
1027e443
KS
459static int follow_pfn_pte(struct vm_area_struct *vma, unsigned long address,
460 pte_t *pte, unsigned int flags)
461{
1027e443
KS
462 if (flags & FOLL_TOUCH) {
463 pte_t entry = *pte;
464
465 if (flags & FOLL_WRITE)
466 entry = pte_mkdirty(entry);
467 entry = pte_mkyoung(entry);
468
469 if (!pte_same(*pte, entry)) {
470 set_pte_at(vma->vm_mm, address, pte, entry);
471 update_mmu_cache(vma, address, pte);
472 }
473 }
474
475 /* Proper page table entry exists, but no corresponding struct page */
476 return -EEXIST;
477}
478
19be0eaf 479/*
a308c71b
PX
480 * FOLL_FORCE can write to even unwritable pte's, but only
481 * after we've gone through a COW cycle and they are dirty.
19be0eaf
LT
482 */
483static inline bool can_follow_write_pte(pte_t pte, unsigned int flags)
484{
a308c71b
PX
485 return pte_write(pte) ||
486 ((flags & FOLL_FORCE) && (flags & FOLL_COW) && pte_dirty(pte));
19be0eaf
LT
487}
488
69e68b4f 489static struct page *follow_page_pte(struct vm_area_struct *vma,
df06b37f
KB
490 unsigned long address, pmd_t *pmd, unsigned int flags,
491 struct dev_pagemap **pgmap)
69e68b4f
KS
492{
493 struct mm_struct *mm = vma->vm_mm;
494 struct page *page;
495 spinlock_t *ptl;
496 pte_t *ptep, pte;
f28d4363 497 int ret;
4bbd4c77 498
eddb1c22
JH
499 /* FOLL_GET and FOLL_PIN are mutually exclusive. */
500 if (WARN_ON_ONCE((flags & (FOLL_PIN | FOLL_GET)) ==
501 (FOLL_PIN | FOLL_GET)))
502 return ERR_PTR(-EINVAL);
69e68b4f 503retry:
4bbd4c77 504 if (unlikely(pmd_bad(*pmd)))
69e68b4f 505 return no_page_table(vma, flags);
4bbd4c77
KS
506
507 ptep = pte_offset_map_lock(mm, pmd, address, &ptl);
4bbd4c77
KS
508 pte = *ptep;
509 if (!pte_present(pte)) {
510 swp_entry_t entry;
511 /*
512 * KSM's break_ksm() relies upon recognizing a ksm page
513 * even while it is being migrated, so for that case we
514 * need migration_entry_wait().
515 */
516 if (likely(!(flags & FOLL_MIGRATION)))
517 goto no_page;
0661a336 518 if (pte_none(pte))
4bbd4c77
KS
519 goto no_page;
520 entry = pte_to_swp_entry(pte);
521 if (!is_migration_entry(entry))
522 goto no_page;
523 pte_unmap_unlock(ptep, ptl);
524 migration_entry_wait(mm, pmd, address);
69e68b4f 525 goto retry;
4bbd4c77 526 }
8a0516ed 527 if ((flags & FOLL_NUMA) && pte_protnone(pte))
4bbd4c77 528 goto no_page;
19be0eaf 529 if ((flags & FOLL_WRITE) && !can_follow_write_pte(pte, flags)) {
69e68b4f
KS
530 pte_unmap_unlock(ptep, ptl);
531 return NULL;
532 }
4bbd4c77
KS
533
534 page = vm_normal_page(vma, address, pte);
3faa52c0 535 if (!page && pte_devmap(pte) && (flags & (FOLL_GET | FOLL_PIN))) {
3565fce3 536 /*
3faa52c0
JH
537 * Only return device mapping pages in the FOLL_GET or FOLL_PIN
538 * case since they are only valid while holding the pgmap
539 * reference.
3565fce3 540 */
df06b37f
KB
541 *pgmap = get_dev_pagemap(pte_pfn(pte), *pgmap);
542 if (*pgmap)
3565fce3
DW
543 page = pte_page(pte);
544 else
545 goto no_page;
546 } else if (unlikely(!page)) {
1027e443
KS
547 if (flags & FOLL_DUMP) {
548 /* Avoid special (like zero) pages in core dumps */
549 page = ERR_PTR(-EFAULT);
550 goto out;
551 }
552
553 if (is_zero_pfn(pte_pfn(pte))) {
554 page = pte_page(pte);
555 } else {
1027e443
KS
556 ret = follow_pfn_pte(vma, address, ptep, flags);
557 page = ERR_PTR(ret);
558 goto out;
559 }
4bbd4c77
KS
560 }
561
a7f22660
DH
562 if (!pte_write(pte) && gup_must_unshare(flags, page)) {
563 page = ERR_PTR(-EMLINK);
564 goto out;
565 }
b6a2619c
DH
566
567 VM_BUG_ON_PAGE((flags & FOLL_PIN) && PageAnon(page) &&
568 !PageAnonExclusive(page), page);
569
3faa52c0
JH
570 /* try_grab_page() does nothing unless FOLL_GET or FOLL_PIN is set. */
571 if (unlikely(!try_grab_page(page, flags))) {
572 page = ERR_PTR(-ENOMEM);
573 goto out;
8fde12ca 574 }
f28d4363
CI
575 /*
576 * We need to make the page accessible if and only if we are going
577 * to access its content (the FOLL_PIN case). Please see
578 * Documentation/core-api/pin_user_pages.rst for details.
579 */
580 if (flags & FOLL_PIN) {
581 ret = arch_make_page_accessible(page);
582 if (ret) {
583 unpin_user_page(page);
584 page = ERR_PTR(ret);
585 goto out;
586 }
587 }
4bbd4c77
KS
588 if (flags & FOLL_TOUCH) {
589 if ((flags & FOLL_WRITE) &&
590 !pte_dirty(pte) && !PageDirty(page))
591 set_page_dirty(page);
592 /*
593 * pte_mkyoung() would be more correct here, but atomic care
594 * is needed to avoid losing the dirty bit: it is easier to use
595 * mark_page_accessed().
596 */
597 mark_page_accessed(page);
598 }
1027e443 599out:
4bbd4c77 600 pte_unmap_unlock(ptep, ptl);
4bbd4c77 601 return page;
4bbd4c77
KS
602no_page:
603 pte_unmap_unlock(ptep, ptl);
604 if (!pte_none(pte))
69e68b4f
KS
605 return NULL;
606 return no_page_table(vma, flags);
607}
608
080dbb61
AK
609static struct page *follow_pmd_mask(struct vm_area_struct *vma,
610 unsigned long address, pud_t *pudp,
df06b37f
KB
611 unsigned int flags,
612 struct follow_page_context *ctx)
69e68b4f 613{
68827280 614 pmd_t *pmd, pmdval;
69e68b4f
KS
615 spinlock_t *ptl;
616 struct page *page;
617 struct mm_struct *mm = vma->vm_mm;
618
080dbb61 619 pmd = pmd_offset(pudp, address);
68827280
HY
620 /*
621 * The READ_ONCE() will stabilize the pmdval in a register or
622 * on the stack so that it will stop changing under the code.
623 */
624 pmdval = READ_ONCE(*pmd);
625 if (pmd_none(pmdval))
69e68b4f 626 return no_page_table(vma, flags);
be9d3045 627 if (pmd_huge(pmdval) && is_vm_hugetlb_page(vma)) {
e66f17ff
NH
628 page = follow_huge_pmd(mm, address, pmd, flags);
629 if (page)
630 return page;
631 return no_page_table(vma, flags);
69e68b4f 632 }
68827280 633 if (is_hugepd(__hugepd(pmd_val(pmdval)))) {
4dc71451 634 page = follow_huge_pd(vma, address,
68827280 635 __hugepd(pmd_val(pmdval)), flags,
4dc71451
AK
636 PMD_SHIFT);
637 if (page)
638 return page;
639 return no_page_table(vma, flags);
640 }
84c3fc4e 641retry:
68827280 642 if (!pmd_present(pmdval)) {
28b0ee3f
LX
643 /*
644 * Should never reach here, if thp migration is not supported;
645 * Otherwise, it must be a thp migration entry.
646 */
647 VM_BUG_ON(!thp_migration_supported() ||
648 !is_pmd_migration_entry(pmdval));
649
84c3fc4e
ZY
650 if (likely(!(flags & FOLL_MIGRATION)))
651 return no_page_table(vma, flags);
28b0ee3f
LX
652
653 pmd_migration_entry_wait(mm, pmd);
68827280
HY
654 pmdval = READ_ONCE(*pmd);
655 /*
656 * MADV_DONTNEED may convert the pmd to null because
c1e8d7c6 657 * mmap_lock is held in read mode
68827280
HY
658 */
659 if (pmd_none(pmdval))
660 return no_page_table(vma, flags);
84c3fc4e
ZY
661 goto retry;
662 }
68827280 663 if (pmd_devmap(pmdval)) {
3565fce3 664 ptl = pmd_lock(mm, pmd);
df06b37f 665 page = follow_devmap_pmd(vma, address, pmd, flags, &ctx->pgmap);
3565fce3
DW
666 spin_unlock(ptl);
667 if (page)
668 return page;
669 }
68827280 670 if (likely(!pmd_trans_huge(pmdval)))
df06b37f 671 return follow_page_pte(vma, address, pmd, flags, &ctx->pgmap);
6742d293 672
68827280 673 if ((flags & FOLL_NUMA) && pmd_protnone(pmdval))
db08f203
AK
674 return no_page_table(vma, flags);
675
84c3fc4e 676retry_locked:
6742d293 677 ptl = pmd_lock(mm, pmd);
68827280
HY
678 if (unlikely(pmd_none(*pmd))) {
679 spin_unlock(ptl);
680 return no_page_table(vma, flags);
681 }
84c3fc4e
ZY
682 if (unlikely(!pmd_present(*pmd))) {
683 spin_unlock(ptl);
684 if (likely(!(flags & FOLL_MIGRATION)))
685 return no_page_table(vma, flags);
686 pmd_migration_entry_wait(mm, pmd);
687 goto retry_locked;
688 }
6742d293
KS
689 if (unlikely(!pmd_trans_huge(*pmd))) {
690 spin_unlock(ptl);
df06b37f 691 return follow_page_pte(vma, address, pmd, flags, &ctx->pgmap);
6742d293 692 }
4066c119 693 if (flags & FOLL_SPLIT_PMD) {
6742d293
KS
694 int ret;
695 page = pmd_page(*pmd);
696 if (is_huge_zero_page(page)) {
697 spin_unlock(ptl);
698 ret = 0;
78ddc534 699 split_huge_pmd(vma, pmd, address);
337d9abf
NH
700 if (pmd_trans_unstable(pmd))
701 ret = -EBUSY;
4066c119 702 } else {
bfe7b00d
SL
703 spin_unlock(ptl);
704 split_huge_pmd(vma, pmd, address);
705 ret = pte_alloc(mm, pmd) ? -ENOMEM : 0;
6742d293
KS
706 }
707
708 return ret ? ERR_PTR(ret) :
df06b37f 709 follow_page_pte(vma, address, pmd, flags, &ctx->pgmap);
69e68b4f 710 }
6742d293
KS
711 page = follow_trans_huge_pmd(vma, address, pmd, flags);
712 spin_unlock(ptl);
df06b37f 713 ctx->page_mask = HPAGE_PMD_NR - 1;
6742d293 714 return page;
4bbd4c77
KS
715}
716
080dbb61
AK
717static struct page *follow_pud_mask(struct vm_area_struct *vma,
718 unsigned long address, p4d_t *p4dp,
df06b37f
KB
719 unsigned int flags,
720 struct follow_page_context *ctx)
080dbb61
AK
721{
722 pud_t *pud;
723 spinlock_t *ptl;
724 struct page *page;
725 struct mm_struct *mm = vma->vm_mm;
726
727 pud = pud_offset(p4dp, address);
728 if (pud_none(*pud))
729 return no_page_table(vma, flags);
be9d3045 730 if (pud_huge(*pud) && is_vm_hugetlb_page(vma)) {
080dbb61
AK
731 page = follow_huge_pud(mm, address, pud, flags);
732 if (page)
733 return page;
734 return no_page_table(vma, flags);
735 }
4dc71451
AK
736 if (is_hugepd(__hugepd(pud_val(*pud)))) {
737 page = follow_huge_pd(vma, address,
738 __hugepd(pud_val(*pud)), flags,
739 PUD_SHIFT);
740 if (page)
741 return page;
742 return no_page_table(vma, flags);
743 }
080dbb61
AK
744 if (pud_devmap(*pud)) {
745 ptl = pud_lock(mm, pud);
df06b37f 746 page = follow_devmap_pud(vma, address, pud, flags, &ctx->pgmap);
080dbb61
AK
747 spin_unlock(ptl);
748 if (page)
749 return page;
750 }
751 if (unlikely(pud_bad(*pud)))
752 return no_page_table(vma, flags);
753
df06b37f 754 return follow_pmd_mask(vma, address, pud, flags, ctx);
080dbb61
AK
755}
756
080dbb61
AK
757static struct page *follow_p4d_mask(struct vm_area_struct *vma,
758 unsigned long address, pgd_t *pgdp,
df06b37f
KB
759 unsigned int flags,
760 struct follow_page_context *ctx)
080dbb61
AK
761{
762 p4d_t *p4d;
4dc71451 763 struct page *page;
080dbb61
AK
764
765 p4d = p4d_offset(pgdp, address);
766 if (p4d_none(*p4d))
767 return no_page_table(vma, flags);
768 BUILD_BUG_ON(p4d_huge(*p4d));
769 if (unlikely(p4d_bad(*p4d)))
770 return no_page_table(vma, flags);
771
4dc71451
AK
772 if (is_hugepd(__hugepd(p4d_val(*p4d)))) {
773 page = follow_huge_pd(vma, address,
774 __hugepd(p4d_val(*p4d)), flags,
775 P4D_SHIFT);
776 if (page)
777 return page;
778 return no_page_table(vma, flags);
779 }
df06b37f 780 return follow_pud_mask(vma, address, p4d, flags, ctx);
080dbb61
AK
781}
782
783/**
784 * follow_page_mask - look up a page descriptor from a user-virtual address
785 * @vma: vm_area_struct mapping @address
786 * @address: virtual address to look up
787 * @flags: flags modifying lookup behaviour
78179556
MR
788 * @ctx: contains dev_pagemap for %ZONE_DEVICE memory pinning and a
789 * pointer to output page_mask
080dbb61
AK
790 *
791 * @flags can have FOLL_ flags set, defined in <linux/mm.h>
792 *
78179556
MR
793 * When getting pages from ZONE_DEVICE memory, the @ctx->pgmap caches
794 * the device's dev_pagemap metadata to avoid repeating expensive lookups.
795 *
a7f22660
DH
796 * When getting an anonymous page and the caller has to trigger unsharing
797 * of a shared anonymous page first, -EMLINK is returned. The caller should
798 * trigger a fault with FAULT_FLAG_UNSHARE set. Note that unsharing is only
799 * relevant with FOLL_PIN and !FOLL_WRITE.
800 *
78179556
MR
801 * On output, the @ctx->page_mask is set according to the size of the page.
802 *
803 * Return: the mapped (struct page *), %NULL if no mapping exists, or
080dbb61
AK
804 * an error pointer if there is a mapping to something not represented
805 * by a page descriptor (see also vm_normal_page()).
806 */
a7030aea 807static struct page *follow_page_mask(struct vm_area_struct *vma,
080dbb61 808 unsigned long address, unsigned int flags,
df06b37f 809 struct follow_page_context *ctx)
080dbb61
AK
810{
811 pgd_t *pgd;
812 struct page *page;
813 struct mm_struct *mm = vma->vm_mm;
814
df06b37f 815 ctx->page_mask = 0;
080dbb61
AK
816
817 /* make this handle hugepd */
818 page = follow_huge_addr(mm, address, flags & FOLL_WRITE);
819 if (!IS_ERR(page)) {
3faa52c0 820 WARN_ON_ONCE(flags & (FOLL_GET | FOLL_PIN));
080dbb61
AK
821 return page;
822 }
823
824 pgd = pgd_offset(mm, address);
825
826 if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd)))
827 return no_page_table(vma, flags);
828
faaa5b62
AK
829 if (pgd_huge(*pgd)) {
830 page = follow_huge_pgd(mm, address, pgd, flags);
831 if (page)
832 return page;
833 return no_page_table(vma, flags);
834 }
4dc71451
AK
835 if (is_hugepd(__hugepd(pgd_val(*pgd)))) {
836 page = follow_huge_pd(vma, address,
837 __hugepd(pgd_val(*pgd)), flags,
838 PGDIR_SHIFT);
839 if (page)
840 return page;
841 return no_page_table(vma, flags);
842 }
faaa5b62 843
df06b37f
KB
844 return follow_p4d_mask(vma, address, pgd, flags, ctx);
845}
846
847struct page *follow_page(struct vm_area_struct *vma, unsigned long address,
848 unsigned int foll_flags)
849{
850 struct follow_page_context ctx = { NULL };
851 struct page *page;
852
1507f512
MR
853 if (vma_is_secretmem(vma))
854 return NULL;
855
8909691b
DH
856 if (foll_flags & FOLL_PIN)
857 return NULL;
858
df06b37f
KB
859 page = follow_page_mask(vma, address, foll_flags, &ctx);
860 if (ctx.pgmap)
861 put_dev_pagemap(ctx.pgmap);
862 return page;
080dbb61
AK
863}
864
f2b495ca
KS
865static int get_gate_page(struct mm_struct *mm, unsigned long address,
866 unsigned int gup_flags, struct vm_area_struct **vma,
867 struct page **page)
868{
869 pgd_t *pgd;
c2febafc 870 p4d_t *p4d;
f2b495ca
KS
871 pud_t *pud;
872 pmd_t *pmd;
873 pte_t *pte;
874 int ret = -EFAULT;
875
876 /* user gate pages are read-only */
877 if (gup_flags & FOLL_WRITE)
878 return -EFAULT;
879 if (address > TASK_SIZE)
880 pgd = pgd_offset_k(address);
881 else
882 pgd = pgd_offset_gate(mm, address);
b5d1c39f
AL
883 if (pgd_none(*pgd))
884 return -EFAULT;
c2febafc 885 p4d = p4d_offset(pgd, address);
b5d1c39f
AL
886 if (p4d_none(*p4d))
887 return -EFAULT;
c2febafc 888 pud = pud_offset(p4d, address);
b5d1c39f
AL
889 if (pud_none(*pud))
890 return -EFAULT;
f2b495ca 891 pmd = pmd_offset(pud, address);
84c3fc4e 892 if (!pmd_present(*pmd))
f2b495ca
KS
893 return -EFAULT;
894 VM_BUG_ON(pmd_trans_huge(*pmd));
895 pte = pte_offset_map(pmd, address);
896 if (pte_none(*pte))
897 goto unmap;
898 *vma = get_gate_vma(mm);
899 if (!page)
900 goto out;
901 *page = vm_normal_page(*vma, address, *pte);
902 if (!*page) {
903 if ((gup_flags & FOLL_DUMP) || !is_zero_pfn(pte_pfn(*pte)))
904 goto unmap;
905 *page = pte_page(*pte);
906 }
9fa2dd94 907 if (unlikely(!try_grab_page(*page, gup_flags))) {
8fde12ca
LT
908 ret = -ENOMEM;
909 goto unmap;
910 }
f2b495ca
KS
911out:
912 ret = 0;
913unmap:
914 pte_unmap(pte);
915 return ret;
916}
917
9a95f3cf 918/*
c1e8d7c6
ML
919 * mmap_lock must be held on entry. If @locked != NULL and *@flags
920 * does not include FOLL_NOWAIT, the mmap_lock may be released. If it
4f6da934 921 * is, *@locked will be set to 0 and -EBUSY returned.
9a95f3cf 922 */
64019a2e 923static int faultin_page(struct vm_area_struct *vma,
a7f22660
DH
924 unsigned long address, unsigned int *flags, bool unshare,
925 int *locked)
16744483 926{
16744483 927 unsigned int fault_flags = 0;
2b740303 928 vm_fault_t ret;
16744483 929
55b8fe70
AG
930 if (*flags & FOLL_NOFAULT)
931 return -EFAULT;
16744483
KS
932 if (*flags & FOLL_WRITE)
933 fault_flags |= FAULT_FLAG_WRITE;
1b2ee126
DH
934 if (*flags & FOLL_REMOTE)
935 fault_flags |= FAULT_FLAG_REMOTE;
4f6da934 936 if (locked)
71335f37 937 fault_flags |= FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
16744483
KS
938 if (*flags & FOLL_NOWAIT)
939 fault_flags |= FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_RETRY_NOWAIT;
234b239b 940 if (*flags & FOLL_TRIED) {
4426e945
PX
941 /*
942 * Note: FAULT_FLAG_ALLOW_RETRY and FAULT_FLAG_TRIED
943 * can co-exist
944 */
234b239b
ALC
945 fault_flags |= FAULT_FLAG_TRIED;
946 }
a7f22660
DH
947 if (unshare) {
948 fault_flags |= FAULT_FLAG_UNSHARE;
949 /* FAULT_FLAG_WRITE and FAULT_FLAG_UNSHARE are incompatible */
950 VM_BUG_ON(fault_flags & FAULT_FLAG_WRITE);
951 }
16744483 952
bce617ed 953 ret = handle_mm_fault(vma, address, fault_flags, NULL);
16744483 954 if (ret & VM_FAULT_ERROR) {
9a291a7c
JM
955 int err = vm_fault_to_errno(ret, *flags);
956
957 if (err)
958 return err;
16744483
KS
959 BUG();
960 }
961
16744483 962 if (ret & VM_FAULT_RETRY) {
4f6da934
PX
963 if (locked && !(fault_flags & FAULT_FLAG_RETRY_NOWAIT))
964 *locked = 0;
16744483
KS
965 return -EBUSY;
966 }
967
968 /*
969 * The VM_FAULT_WRITE bit tells us that do_wp_page has broken COW when
970 * necessary, even if maybe_mkwrite decided not to set pte_write. We
971 * can thus safely do subsequent page lookups as if they were reads.
972 * But only do so when looping for pte_write is futile: in some cases
973 * userspace may also be wanting to write to the gotten user page,
974 * which a read fault here might prevent (a readonly page might get
975 * reCOWed by userspace write).
976 */
977 if ((ret & VM_FAULT_WRITE) && !(vma->vm_flags & VM_WRITE))
2923117b 978 *flags |= FOLL_COW;
16744483
KS
979 return 0;
980}
981
fa5bb209
KS
982static int check_vma_flags(struct vm_area_struct *vma, unsigned long gup_flags)
983{
984 vm_flags_t vm_flags = vma->vm_flags;
1b2ee126
DH
985 int write = (gup_flags & FOLL_WRITE);
986 int foreign = (gup_flags & FOLL_REMOTE);
fa5bb209
KS
987
988 if (vm_flags & (VM_IO | VM_PFNMAP))
989 return -EFAULT;
990
7f7ccc2c
WT
991 if (gup_flags & FOLL_ANON && !vma_is_anonymous(vma))
992 return -EFAULT;
993
52650c8b
JG
994 if ((gup_flags & FOLL_LONGTERM) && vma_is_fsdax(vma))
995 return -EOPNOTSUPP;
996
1507f512
MR
997 if (vma_is_secretmem(vma))
998 return -EFAULT;
999
1b2ee126 1000 if (write) {
fa5bb209
KS
1001 if (!(vm_flags & VM_WRITE)) {
1002 if (!(gup_flags & FOLL_FORCE))
1003 return -EFAULT;
1004 /*
1005 * We used to let the write,force case do COW in a
1006 * VM_MAYWRITE VM_SHARED !VM_WRITE vma, so ptrace could
1007 * set a breakpoint in a read-only mapping of an
1008 * executable, without corrupting the file (yet only
1009 * when that file had been opened for writing!).
1010 * Anon pages in shared mappings are surprising: now
1011 * just reject it.
1012 */
46435364 1013 if (!is_cow_mapping(vm_flags))
fa5bb209 1014 return -EFAULT;
fa5bb209
KS
1015 }
1016 } else if (!(vm_flags & VM_READ)) {
1017 if (!(gup_flags & FOLL_FORCE))
1018 return -EFAULT;
1019 /*
1020 * Is there actually any vma we can reach here which does not
1021 * have VM_MAYREAD set?
1022 */
1023 if (!(vm_flags & VM_MAYREAD))
1024 return -EFAULT;
1025 }
d61172b4
DH
1026 /*
1027 * gups are always data accesses, not instruction
1028 * fetches, so execute=false here
1029 */
1030 if (!arch_vma_access_permitted(vma, write, false, foreign))
33a709b2 1031 return -EFAULT;
fa5bb209
KS
1032 return 0;
1033}
1034
4bbd4c77
KS
1035/**
1036 * __get_user_pages() - pin user pages in memory
4bbd4c77
KS
1037 * @mm: mm_struct of target mm
1038 * @start: starting user address
1039 * @nr_pages: number of pages from start to pin
1040 * @gup_flags: flags modifying pin behaviour
1041 * @pages: array that receives pointers to the pages pinned.
1042 * Should be at least nr_pages long. Or NULL, if caller
1043 * only intends to ensure the pages are faulted in.
1044 * @vmas: array of pointers to vmas corresponding to each page.
1045 * Or NULL if the caller does not require them.
c1e8d7c6 1046 * @locked: whether we're still with the mmap_lock held
4bbd4c77 1047 *
d2dfbe47
LX
1048 * Returns either number of pages pinned (which may be less than the
1049 * number requested), or an error. Details about the return value:
1050 *
1051 * -- If nr_pages is 0, returns 0.
1052 * -- If nr_pages is >0, but no pages were pinned, returns -errno.
1053 * -- If nr_pages is >0, and some pages were pinned, returns the number of
1054 * pages pinned. Again, this may be less than nr_pages.
2d3a36a4 1055 * -- 0 return value is possible when the fault would need to be retried.
d2dfbe47
LX
1056 *
1057 * The caller is responsible for releasing returned @pages, via put_page().
1058 *
c1e8d7c6 1059 * @vmas are valid only as long as mmap_lock is held.
4bbd4c77 1060 *
c1e8d7c6 1061 * Must be called with mmap_lock held. It may be released. See below.
4bbd4c77
KS
1062 *
1063 * __get_user_pages walks a process's page tables and takes a reference to
1064 * each struct page that each user address corresponds to at a given
1065 * instant. That is, it takes the page that would be accessed if a user
1066 * thread accesses the given user virtual address at that instant.
1067 *
1068 * This does not guarantee that the page exists in the user mappings when
1069 * __get_user_pages returns, and there may even be a completely different
1070 * page there in some cases (eg. if mmapped pagecache has been invalidated
1071 * and subsequently re faulted). However it does guarantee that the page
1072 * won't be freed completely. And mostly callers simply care that the page
1073 * contains data that was valid *at some point in time*. Typically, an IO
1074 * or similar operation cannot guarantee anything stronger anyway because
1075 * locks can't be held over the syscall boundary.
1076 *
1077 * If @gup_flags & FOLL_WRITE == 0, the page must not be written to. If
1078 * the page is written to, set_page_dirty (or set_page_dirty_lock, as
1079 * appropriate) must be called after the page is finished with, and
1080 * before put_page is called.
1081 *
c1e8d7c6 1082 * If @locked != NULL, *@locked will be set to 0 when mmap_lock is
4f6da934
PX
1083 * released by an up_read(). That can happen if @gup_flags does not
1084 * have FOLL_NOWAIT.
9a95f3cf 1085 *
4f6da934 1086 * A caller using such a combination of @locked and @gup_flags
c1e8d7c6 1087 * must therefore hold the mmap_lock for reading only, and recognize
9a95f3cf
PC
1088 * when it's been released. Otherwise, it must be held for either
1089 * reading or writing and will not be released.
4bbd4c77
KS
1090 *
1091 * In most cases, get_user_pages or get_user_pages_fast should be used
1092 * instead of __get_user_pages. __get_user_pages should be used only if
1093 * you need some special @gup_flags.
1094 */
64019a2e 1095static long __get_user_pages(struct mm_struct *mm,
4bbd4c77
KS
1096 unsigned long start, unsigned long nr_pages,
1097 unsigned int gup_flags, struct page **pages,
4f6da934 1098 struct vm_area_struct **vmas, int *locked)
4bbd4c77 1099{
df06b37f 1100 long ret = 0, i = 0;
fa5bb209 1101 struct vm_area_struct *vma = NULL;
df06b37f 1102 struct follow_page_context ctx = { NULL };
4bbd4c77
KS
1103
1104 if (!nr_pages)
1105 return 0;
1106
f9652594
AK
1107 start = untagged_addr(start);
1108
eddb1c22 1109 VM_BUG_ON(!!pages != !!(gup_flags & (FOLL_GET | FOLL_PIN)));
4bbd4c77
KS
1110
1111 /*
1112 * If FOLL_FORCE is set then do not force a full fault as the hinting
1113 * fault information is unrelated to the reference behaviour of a task
1114 * using the address space
1115 */
1116 if (!(gup_flags & FOLL_FORCE))
1117 gup_flags |= FOLL_NUMA;
1118
4bbd4c77 1119 do {
fa5bb209
KS
1120 struct page *page;
1121 unsigned int foll_flags = gup_flags;
1122 unsigned int page_increm;
1123
1124 /* first iteration or cross vma bound */
1125 if (!vma || start >= vma->vm_end) {
1126 vma = find_extend_vma(mm, start);
1127 if (!vma && in_gate_area(mm, start)) {
fa5bb209
KS
1128 ret = get_gate_page(mm, start & PAGE_MASK,
1129 gup_flags, &vma,
1130 pages ? &pages[i] : NULL);
1131 if (ret)
08be37b7 1132 goto out;
df06b37f 1133 ctx.page_mask = 0;
fa5bb209
KS
1134 goto next_page;
1135 }
4bbd4c77 1136
52650c8b 1137 if (!vma) {
df06b37f
KB
1138 ret = -EFAULT;
1139 goto out;
1140 }
52650c8b
JG
1141 ret = check_vma_flags(vma, gup_flags);
1142 if (ret)
1143 goto out;
1144
fa5bb209
KS
1145 if (is_vm_hugetlb_page(vma)) {
1146 i = follow_hugetlb_page(mm, vma, pages, vmas,
1147 &start, &nr_pages, i,
a308c71b 1148 gup_flags, locked);
ad415db8
PX
1149 if (locked && *locked == 0) {
1150 /*
1151 * We've got a VM_FAULT_RETRY
c1e8d7c6 1152 * and we've lost mmap_lock.
ad415db8
PX
1153 * We must stop here.
1154 */
1155 BUG_ON(gup_flags & FOLL_NOWAIT);
ad415db8
PX
1156 goto out;
1157 }
fa5bb209 1158 continue;
4bbd4c77 1159 }
fa5bb209
KS
1160 }
1161retry:
1162 /*
1163 * If we have a pending SIGKILL, don't keep faulting pages and
1164 * potentially allocating memory.
1165 */
fa45f116 1166 if (fatal_signal_pending(current)) {
d180870d 1167 ret = -EINTR;
df06b37f
KB
1168 goto out;
1169 }
fa5bb209 1170 cond_resched();
df06b37f
KB
1171
1172 page = follow_page_mask(vma, start, foll_flags, &ctx);
a7f22660
DH
1173 if (!page || PTR_ERR(page) == -EMLINK) {
1174 ret = faultin_page(vma, start, &foll_flags,
1175 PTR_ERR(page) == -EMLINK, locked);
fa5bb209
KS
1176 switch (ret) {
1177 case 0:
1178 goto retry;
df06b37f
KB
1179 case -EBUSY:
1180 ret = 0;
e4a9bc58 1181 fallthrough;
fa5bb209
KS
1182 case -EFAULT:
1183 case -ENOMEM:
1184 case -EHWPOISON:
df06b37f 1185 goto out;
4bbd4c77 1186 }
fa5bb209 1187 BUG();
1027e443
KS
1188 } else if (PTR_ERR(page) == -EEXIST) {
1189 /*
1190 * Proper page table entry exists, but no corresponding
65462462
JH
1191 * struct page. If the caller expects **pages to be
1192 * filled in, bail out now, because that can't be done
1193 * for this page.
1027e443 1194 */
65462462
JH
1195 if (pages) {
1196 ret = PTR_ERR(page);
1197 goto out;
1198 }
1199
1027e443
KS
1200 goto next_page;
1201 } else if (IS_ERR(page)) {
df06b37f
KB
1202 ret = PTR_ERR(page);
1203 goto out;
1027e443 1204 }
fa5bb209
KS
1205 if (pages) {
1206 pages[i] = page;
1207 flush_anon_page(vma, page, start);
1208 flush_dcache_page(page);
df06b37f 1209 ctx.page_mask = 0;
4bbd4c77 1210 }
4bbd4c77 1211next_page:
fa5bb209
KS
1212 if (vmas) {
1213 vmas[i] = vma;
df06b37f 1214 ctx.page_mask = 0;
fa5bb209 1215 }
df06b37f 1216 page_increm = 1 + (~(start >> PAGE_SHIFT) & ctx.page_mask);
fa5bb209
KS
1217 if (page_increm > nr_pages)
1218 page_increm = nr_pages;
1219 i += page_increm;
1220 start += page_increm * PAGE_SIZE;
1221 nr_pages -= page_increm;
4bbd4c77 1222 } while (nr_pages);
df06b37f
KB
1223out:
1224 if (ctx.pgmap)
1225 put_dev_pagemap(ctx.pgmap);
1226 return i ? i : ret;
4bbd4c77 1227}
4bbd4c77 1228
771ab430
TK
1229static bool vma_permits_fault(struct vm_area_struct *vma,
1230 unsigned int fault_flags)
d4925e00 1231{
1b2ee126
DH
1232 bool write = !!(fault_flags & FAULT_FLAG_WRITE);
1233 bool foreign = !!(fault_flags & FAULT_FLAG_REMOTE);
33a709b2 1234 vm_flags_t vm_flags = write ? VM_WRITE : VM_READ;
d4925e00
DH
1235
1236 if (!(vm_flags & vma->vm_flags))
1237 return false;
1238
33a709b2
DH
1239 /*
1240 * The architecture might have a hardware protection
1b2ee126 1241 * mechanism other than read/write that can deny access.
d61172b4
DH
1242 *
1243 * gup always represents data access, not instruction
1244 * fetches, so execute=false here:
33a709b2 1245 */
d61172b4 1246 if (!arch_vma_access_permitted(vma, write, false, foreign))
33a709b2
DH
1247 return false;
1248
d4925e00
DH
1249 return true;
1250}
1251
adc8cb40 1252/**
4bbd4c77 1253 * fixup_user_fault() - manually resolve a user page fault
4bbd4c77
KS
1254 * @mm: mm_struct of target mm
1255 * @address: user address
1256 * @fault_flags:flags to pass down to handle_mm_fault()
c1e8d7c6 1257 * @unlocked: did we unlock the mmap_lock while retrying, maybe NULL if caller
548b6a1e
MC
1258 * does not allow retry. If NULL, the caller must guarantee
1259 * that fault_flags does not contain FAULT_FLAG_ALLOW_RETRY.
4bbd4c77
KS
1260 *
1261 * This is meant to be called in the specific scenario where for locking reasons
1262 * we try to access user memory in atomic context (within a pagefault_disable()
1263 * section), this returns -EFAULT, and we want to resolve the user fault before
1264 * trying again.
1265 *
1266 * Typically this is meant to be used by the futex code.
1267 *
1268 * The main difference with get_user_pages() is that this function will
1269 * unconditionally call handle_mm_fault() which will in turn perform all the
1270 * necessary SW fixup of the dirty and young bits in the PTE, while
4a9e1cda 1271 * get_user_pages() only guarantees to update these in the struct page.
4bbd4c77
KS
1272 *
1273 * This is important for some architectures where those bits also gate the
1274 * access permission to the page because they are maintained in software. On
1275 * such architectures, gup() will not be enough to make a subsequent access
1276 * succeed.
1277 *
c1e8d7c6
ML
1278 * This function will not return with an unlocked mmap_lock. So it has not the
1279 * same semantics wrt the @mm->mmap_lock as does filemap_fault().
4bbd4c77 1280 */
64019a2e 1281int fixup_user_fault(struct mm_struct *mm,
4a9e1cda
DD
1282 unsigned long address, unsigned int fault_flags,
1283 bool *unlocked)
4bbd4c77
KS
1284{
1285 struct vm_area_struct *vma;
8fed2f3c 1286 vm_fault_t ret;
4a9e1cda 1287
f9652594
AK
1288 address = untagged_addr(address);
1289
4a9e1cda 1290 if (unlocked)
71335f37 1291 fault_flags |= FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
4bbd4c77 1292
4a9e1cda 1293retry:
4bbd4c77
KS
1294 vma = find_extend_vma(mm, address);
1295 if (!vma || address < vma->vm_start)
1296 return -EFAULT;
1297
d4925e00 1298 if (!vma_permits_fault(vma, fault_flags))
4bbd4c77
KS
1299 return -EFAULT;
1300
475f4dfc
PX
1301 if ((fault_flags & FAULT_FLAG_KILLABLE) &&
1302 fatal_signal_pending(current))
1303 return -EINTR;
1304
bce617ed 1305 ret = handle_mm_fault(vma, address, fault_flags, NULL);
4bbd4c77 1306 if (ret & VM_FAULT_ERROR) {
9a291a7c
JM
1307 int err = vm_fault_to_errno(ret, 0);
1308
1309 if (err)
1310 return err;
4bbd4c77
KS
1311 BUG();
1312 }
4a9e1cda
DD
1313
1314 if (ret & VM_FAULT_RETRY) {
d8ed45c5 1315 mmap_read_lock(mm);
475f4dfc
PX
1316 *unlocked = true;
1317 fault_flags |= FAULT_FLAG_TRIED;
1318 goto retry;
4a9e1cda
DD
1319 }
1320
4bbd4c77
KS
1321 return 0;
1322}
add6a0cd 1323EXPORT_SYMBOL_GPL(fixup_user_fault);
4bbd4c77 1324
2d3a36a4
MH
1325/*
1326 * Please note that this function, unlike __get_user_pages will not
1327 * return 0 for nr_pages > 0 without FOLL_NOWAIT
1328 */
64019a2e 1329static __always_inline long __get_user_pages_locked(struct mm_struct *mm,
f0818f47
AA
1330 unsigned long start,
1331 unsigned long nr_pages,
f0818f47
AA
1332 struct page **pages,
1333 struct vm_area_struct **vmas,
e716712f 1334 int *locked,
0fd71a56 1335 unsigned int flags)
f0818f47 1336{
f0818f47
AA
1337 long ret, pages_done;
1338 bool lock_dropped;
1339
1340 if (locked) {
1341 /* if VM_FAULT_RETRY can be returned, vmas become invalid */
1342 BUG_ON(vmas);
1343 /* check caller initialized locked */
1344 BUG_ON(*locked != 1);
1345 }
1346
a458b76a
AA
1347 if (flags & FOLL_PIN)
1348 mm_set_has_pinned_flag(&mm->flags);
008cfe44 1349
eddb1c22
JH
1350 /*
1351 * FOLL_PIN and FOLL_GET are mutually exclusive. Traditional behavior
1352 * is to set FOLL_GET if the caller wants pages[] filled in (but has
1353 * carelessly failed to specify FOLL_GET), so keep doing that, but only
1354 * for FOLL_GET, not for the newer FOLL_PIN.
1355 *
1356 * FOLL_PIN always expects pages to be non-null, but no need to assert
1357 * that here, as any failures will be obvious enough.
1358 */
1359 if (pages && !(flags & FOLL_PIN))
f0818f47 1360 flags |= FOLL_GET;
f0818f47
AA
1361
1362 pages_done = 0;
1363 lock_dropped = false;
1364 for (;;) {
64019a2e 1365 ret = __get_user_pages(mm, start, nr_pages, flags, pages,
f0818f47
AA
1366 vmas, locked);
1367 if (!locked)
1368 /* VM_FAULT_RETRY couldn't trigger, bypass */
1369 return ret;
1370
1371 /* VM_FAULT_RETRY cannot return errors */
1372 if (!*locked) {
1373 BUG_ON(ret < 0);
1374 BUG_ON(ret >= nr_pages);
1375 }
1376
f0818f47
AA
1377 if (ret > 0) {
1378 nr_pages -= ret;
1379 pages_done += ret;
1380 if (!nr_pages)
1381 break;
1382 }
1383 if (*locked) {
96312e61
AA
1384 /*
1385 * VM_FAULT_RETRY didn't trigger or it was a
1386 * FOLL_NOWAIT.
1387 */
f0818f47
AA
1388 if (!pages_done)
1389 pages_done = ret;
1390 break;
1391 }
df17277b
MR
1392 /*
1393 * VM_FAULT_RETRY triggered, so seek to the faulting offset.
1394 * For the prefault case (!pages) we only update counts.
1395 */
1396 if (likely(pages))
1397 pages += ret;
f0818f47 1398 start += ret << PAGE_SHIFT;
4426e945 1399 lock_dropped = true;
f0818f47 1400
4426e945 1401retry:
f0818f47
AA
1402 /*
1403 * Repeat on the address that fired VM_FAULT_RETRY
4426e945
PX
1404 * with both FAULT_FLAG_ALLOW_RETRY and
1405 * FAULT_FLAG_TRIED. Note that GUP can be interrupted
1406 * by fatal signals, so we need to check it before we
1407 * start trying again otherwise it can loop forever.
f0818f47 1408 */
4426e945 1409
ae46d2aa
HD
1410 if (fatal_signal_pending(current)) {
1411 if (!pages_done)
1412 pages_done = -EINTR;
4426e945 1413 break;
ae46d2aa 1414 }
4426e945 1415
d8ed45c5 1416 ret = mmap_read_lock_killable(mm);
71335f37
PX
1417 if (ret) {
1418 BUG_ON(ret > 0);
1419 if (!pages_done)
1420 pages_done = ret;
1421 break;
1422 }
4426e945 1423
c7b6a566 1424 *locked = 1;
64019a2e 1425 ret = __get_user_pages(mm, start, 1, flags | FOLL_TRIED,
4426e945
PX
1426 pages, NULL, locked);
1427 if (!*locked) {
1428 /* Continue to retry until we succeeded */
1429 BUG_ON(ret != 0);
1430 goto retry;
1431 }
f0818f47
AA
1432 if (ret != 1) {
1433 BUG_ON(ret > 1);
1434 if (!pages_done)
1435 pages_done = ret;
1436 break;
1437 }
1438 nr_pages--;
1439 pages_done++;
1440 if (!nr_pages)
1441 break;
df17277b
MR
1442 if (likely(pages))
1443 pages++;
f0818f47
AA
1444 start += PAGE_SIZE;
1445 }
e716712f 1446 if (lock_dropped && *locked) {
f0818f47
AA
1447 /*
1448 * We must let the caller know we temporarily dropped the lock
1449 * and so the critical section protected by it was lost.
1450 */
d8ed45c5 1451 mmap_read_unlock(mm);
f0818f47
AA
1452 *locked = 0;
1453 }
1454 return pages_done;
1455}
1456
d3649f68
CH
1457/**
1458 * populate_vma_page_range() - populate a range of pages in the vma.
1459 * @vma: target vma
1460 * @start: start address
1461 * @end: end address
c1e8d7c6 1462 * @locked: whether the mmap_lock is still held
d3649f68
CH
1463 *
1464 * This takes care of mlocking the pages too if VM_LOCKED is set.
1465 *
0a36f7f8
TY
1466 * Return either number of pages pinned in the vma, or a negative error
1467 * code on error.
d3649f68 1468 *
c1e8d7c6 1469 * vma->vm_mm->mmap_lock must be held.
d3649f68 1470 *
4f6da934 1471 * If @locked is NULL, it may be held for read or write and will
d3649f68
CH
1472 * be unperturbed.
1473 *
4f6da934
PX
1474 * If @locked is non-NULL, it must held for read only and may be
1475 * released. If it's released, *@locked will be set to 0.
d3649f68
CH
1476 */
1477long populate_vma_page_range(struct vm_area_struct *vma,
4f6da934 1478 unsigned long start, unsigned long end, int *locked)
d3649f68
CH
1479{
1480 struct mm_struct *mm = vma->vm_mm;
1481 unsigned long nr_pages = (end - start) / PAGE_SIZE;
1482 int gup_flags;
ece369c7 1483 long ret;
d3649f68 1484
be51eb18
ML
1485 VM_BUG_ON(!PAGE_ALIGNED(start));
1486 VM_BUG_ON(!PAGE_ALIGNED(end));
d3649f68
CH
1487 VM_BUG_ON_VMA(start < vma->vm_start, vma);
1488 VM_BUG_ON_VMA(end > vma->vm_end, vma);
42fc5414 1489 mmap_assert_locked(mm);
d3649f68 1490
b67bf49c
HD
1491 /*
1492 * Rightly or wrongly, the VM_LOCKONFAULT case has never used
1493 * faultin_page() to break COW, so it has no work to do here.
1494 */
d3649f68 1495 if (vma->vm_flags & VM_LOCKONFAULT)
b67bf49c
HD
1496 return nr_pages;
1497
1498 gup_flags = FOLL_TOUCH;
d3649f68
CH
1499 /*
1500 * We want to touch writable mappings with a write fault in order
1501 * to break COW, except for shared mappings because these don't COW
1502 * and we would not want to dirty them for nothing.
1503 */
1504 if ((vma->vm_flags & (VM_WRITE | VM_SHARED)) == VM_WRITE)
1505 gup_flags |= FOLL_WRITE;
1506
1507 /*
1508 * We want mlock to succeed for regions that have any permissions
1509 * other than PROT_NONE.
1510 */
3122e80e 1511 if (vma_is_accessible(vma))
d3649f68
CH
1512 gup_flags |= FOLL_FORCE;
1513
1514 /*
1515 * We made sure addr is within a VMA, so the following will
1516 * not result in a stack expansion that recurses back here.
1517 */
ece369c7 1518 ret = __get_user_pages(mm, start, nr_pages, gup_flags,
4f6da934 1519 NULL, NULL, locked);
ece369c7
HD
1520 lru_add_drain();
1521 return ret;
d3649f68
CH
1522}
1523
4ca9b385
DH
1524/*
1525 * faultin_vma_page_range() - populate (prefault) page tables inside the
1526 * given VMA range readable/writable
1527 *
1528 * This takes care of mlocking the pages, too, if VM_LOCKED is set.
1529 *
1530 * @vma: target vma
1531 * @start: start address
1532 * @end: end address
1533 * @write: whether to prefault readable or writable
1534 * @locked: whether the mmap_lock is still held
1535 *
1536 * Returns either number of processed pages in the vma, or a negative error
1537 * code on error (see __get_user_pages()).
1538 *
1539 * vma->vm_mm->mmap_lock must be held. The range must be page-aligned and
1540 * covered by the VMA.
1541 *
1542 * If @locked is NULL, it may be held for read or write and will be unperturbed.
1543 *
1544 * If @locked is non-NULL, it must held for read only and may be released. If
1545 * it's released, *@locked will be set to 0.
1546 */
1547long faultin_vma_page_range(struct vm_area_struct *vma, unsigned long start,
1548 unsigned long end, bool write, int *locked)
1549{
1550 struct mm_struct *mm = vma->vm_mm;
1551 unsigned long nr_pages = (end - start) / PAGE_SIZE;
1552 int gup_flags;
ece369c7 1553 long ret;
4ca9b385
DH
1554
1555 VM_BUG_ON(!PAGE_ALIGNED(start));
1556 VM_BUG_ON(!PAGE_ALIGNED(end));
1557 VM_BUG_ON_VMA(start < vma->vm_start, vma);
1558 VM_BUG_ON_VMA(end > vma->vm_end, vma);
1559 mmap_assert_locked(mm);
1560
1561 /*
1562 * FOLL_TOUCH: Mark page accessed and thereby young; will also mark
1563 * the page dirty with FOLL_WRITE -- which doesn't make a
1564 * difference with !FOLL_FORCE, because the page is writable
1565 * in the page table.
1566 * FOLL_HWPOISON: Return -EHWPOISON instead of -EFAULT when we hit
1567 * a poisoned page.
4ca9b385
DH
1568 * !FOLL_FORCE: Require proper access permissions.
1569 */
b67bf49c 1570 gup_flags = FOLL_TOUCH | FOLL_HWPOISON;
4ca9b385
DH
1571 if (write)
1572 gup_flags |= FOLL_WRITE;
1573
1574 /*
eb2faa51
DH
1575 * We want to report -EINVAL instead of -EFAULT for any permission
1576 * problems or incompatible mappings.
4ca9b385 1577 */
eb2faa51
DH
1578 if (check_vma_flags(vma, gup_flags))
1579 return -EINVAL;
1580
ece369c7 1581 ret = __get_user_pages(mm, start, nr_pages, gup_flags,
4ca9b385 1582 NULL, NULL, locked);
ece369c7
HD
1583 lru_add_drain();
1584 return ret;
4ca9b385
DH
1585}
1586
d3649f68
CH
1587/*
1588 * __mm_populate - populate and/or mlock pages within a range of address space.
1589 *
1590 * This is used to implement mlock() and the MAP_POPULATE / MAP_LOCKED mmap
1591 * flags. VMAs must be already marked with the desired vm_flags, and
c1e8d7c6 1592 * mmap_lock must not be held.
d3649f68
CH
1593 */
1594int __mm_populate(unsigned long start, unsigned long len, int ignore_errors)
1595{
1596 struct mm_struct *mm = current->mm;
1597 unsigned long end, nstart, nend;
1598 struct vm_area_struct *vma = NULL;
1599 int locked = 0;
1600 long ret = 0;
1601
1602 end = start + len;
1603
1604 for (nstart = start; nstart < end; nstart = nend) {
1605 /*
1606 * We want to fault in pages for [nstart; end) address range.
1607 * Find first corresponding VMA.
1608 */
1609 if (!locked) {
1610 locked = 1;
d8ed45c5 1611 mmap_read_lock(mm);
d3649f68
CH
1612 vma = find_vma(mm, nstart);
1613 } else if (nstart >= vma->vm_end)
1614 vma = vma->vm_next;
1615 if (!vma || vma->vm_start >= end)
1616 break;
1617 /*
1618 * Set [nstart; nend) to intersection of desired address
1619 * range with the first VMA. Also, skip undesirable VMA types.
1620 */
1621 nend = min(end, vma->vm_end);
1622 if (vma->vm_flags & (VM_IO | VM_PFNMAP))
1623 continue;
1624 if (nstart < vma->vm_start)
1625 nstart = vma->vm_start;
1626 /*
1627 * Now fault in a range of pages. populate_vma_page_range()
1628 * double checks the vma flags, so that it won't mlock pages
1629 * if the vma was already munlocked.
1630 */
1631 ret = populate_vma_page_range(vma, nstart, nend, &locked);
1632 if (ret < 0) {
1633 if (ignore_errors) {
1634 ret = 0;
1635 continue; /* continue at next VMA */
1636 }
1637 break;
1638 }
1639 nend = nstart + ret * PAGE_SIZE;
1640 ret = 0;
1641 }
1642 if (locked)
d8ed45c5 1643 mmap_read_unlock(mm);
d3649f68
CH
1644 return ret; /* 0 or negative error code */
1645}
050a9adc 1646#else /* CONFIG_MMU */
64019a2e 1647static long __get_user_pages_locked(struct mm_struct *mm, unsigned long start,
050a9adc
CH
1648 unsigned long nr_pages, struct page **pages,
1649 struct vm_area_struct **vmas, int *locked,
1650 unsigned int foll_flags)
1651{
1652 struct vm_area_struct *vma;
1653 unsigned long vm_flags;
24dc20c7 1654 long i;
050a9adc
CH
1655
1656 /* calculate required read or write permissions.
1657 * If FOLL_FORCE is set, we only require the "MAY" flags.
1658 */
1659 vm_flags = (foll_flags & FOLL_WRITE) ?
1660 (VM_WRITE | VM_MAYWRITE) : (VM_READ | VM_MAYREAD);
1661 vm_flags &= (foll_flags & FOLL_FORCE) ?
1662 (VM_MAYREAD | VM_MAYWRITE) : (VM_READ | VM_WRITE);
1663
1664 for (i = 0; i < nr_pages; i++) {
1665 vma = find_vma(mm, start);
1666 if (!vma)
1667 goto finish_or_fault;
1668
1669 /* protect what we can, including chardevs */
1670 if ((vma->vm_flags & (VM_IO | VM_PFNMAP)) ||
1671 !(vm_flags & vma->vm_flags))
1672 goto finish_or_fault;
1673
1674 if (pages) {
1675 pages[i] = virt_to_page(start);
1676 if (pages[i])
1677 get_page(pages[i]);
1678 }
1679 if (vmas)
1680 vmas[i] = vma;
1681 start = (start + PAGE_SIZE) & PAGE_MASK;
1682 }
1683
1684 return i;
1685
1686finish_or_fault:
1687 return i ? : -EFAULT;
1688}
1689#endif /* !CONFIG_MMU */
d3649f68 1690
bb523b40
AG
1691/**
1692 * fault_in_writeable - fault in userspace address range for writing
1693 * @uaddr: start of address range
1694 * @size: size of address range
1695 *
1696 * Returns the number of bytes not faulted in (like copy_to_user() and
1697 * copy_from_user()).
1698 */
1699size_t fault_in_writeable(char __user *uaddr, size_t size)
1700{
1701 char __user *start = uaddr, *end;
1702
1703 if (unlikely(size == 0))
1704 return 0;
677b2a8c
CL
1705 if (!user_write_access_begin(uaddr, size))
1706 return size;
bb523b40 1707 if (!PAGE_ALIGNED(uaddr)) {
677b2a8c 1708 unsafe_put_user(0, uaddr, out);
bb523b40
AG
1709 uaddr = (char __user *)PAGE_ALIGN((unsigned long)uaddr);
1710 }
1711 end = (char __user *)PAGE_ALIGN((unsigned long)start + size);
1712 if (unlikely(end < start))
1713 end = NULL;
1714 while (uaddr != end) {
677b2a8c 1715 unsafe_put_user(0, uaddr, out);
bb523b40
AG
1716 uaddr += PAGE_SIZE;
1717 }
1718
1719out:
677b2a8c 1720 user_write_access_end();
bb523b40
AG
1721 if (size > uaddr - start)
1722 return size - (uaddr - start);
1723 return 0;
1724}
1725EXPORT_SYMBOL(fault_in_writeable);
1726
cdd591fc
AG
1727/*
1728 * fault_in_safe_writeable - fault in an address range for writing
1729 * @uaddr: start of address range
1730 * @size: length of address range
1731 *
fe673d3f
LT
1732 * Faults in an address range for writing. This is primarily useful when we
1733 * already know that some or all of the pages in the address range aren't in
1734 * memory.
cdd591fc 1735 *
fe673d3f 1736 * Unlike fault_in_writeable(), this function is non-destructive.
cdd591fc
AG
1737 *
1738 * Note that we don't pin or otherwise hold the pages referenced that we fault
1739 * in. There's no guarantee that they'll stay in memory for any duration of
1740 * time.
1741 *
1742 * Returns the number of bytes not faulted in, like copy_to_user() and
1743 * copy_from_user().
1744 */
1745size_t fault_in_safe_writeable(const char __user *uaddr, size_t size)
1746{
fe673d3f 1747 unsigned long start = (unsigned long)uaddr, end;
cdd591fc 1748 struct mm_struct *mm = current->mm;
fe673d3f 1749 bool unlocked = false;
cdd591fc 1750
fe673d3f
LT
1751 if (unlikely(size == 0))
1752 return 0;
cdd591fc 1753 end = PAGE_ALIGN(start + size);
fe673d3f 1754 if (end < start)
cdd591fc 1755 end = 0;
cdd591fc 1756
fe673d3f
LT
1757 mmap_read_lock(mm);
1758 do {
1759 if (fixup_user_fault(mm, start, FAULT_FLAG_WRITE, &unlocked))
cdd591fc 1760 break;
fe673d3f
LT
1761 start = (start + PAGE_SIZE) & PAGE_MASK;
1762 } while (start != end);
1763 mmap_read_unlock(mm);
1764
1765 if (size > (unsigned long)uaddr - start)
1766 return size - ((unsigned long)uaddr - start);
1767 return 0;
cdd591fc
AG
1768}
1769EXPORT_SYMBOL(fault_in_safe_writeable);
1770
bb523b40
AG
1771/**
1772 * fault_in_readable - fault in userspace address range for reading
1773 * @uaddr: start of user address range
1774 * @size: size of user address range
1775 *
1776 * Returns the number of bytes not faulted in (like copy_to_user() and
1777 * copy_from_user()).
1778 */
1779size_t fault_in_readable(const char __user *uaddr, size_t size)
1780{
1781 const char __user *start = uaddr, *end;
1782 volatile char c;
1783
1784 if (unlikely(size == 0))
1785 return 0;
677b2a8c
CL
1786 if (!user_read_access_begin(uaddr, size))
1787 return size;
bb523b40 1788 if (!PAGE_ALIGNED(uaddr)) {
677b2a8c 1789 unsafe_get_user(c, uaddr, out);
bb523b40
AG
1790 uaddr = (const char __user *)PAGE_ALIGN((unsigned long)uaddr);
1791 }
1792 end = (const char __user *)PAGE_ALIGN((unsigned long)start + size);
1793 if (unlikely(end < start))
1794 end = NULL;
1795 while (uaddr != end) {
677b2a8c 1796 unsafe_get_user(c, uaddr, out);
bb523b40
AG
1797 uaddr += PAGE_SIZE;
1798 }
1799
1800out:
677b2a8c 1801 user_read_access_end();
bb523b40
AG
1802 (void)c;
1803 if (size > uaddr - start)
1804 return size - (uaddr - start);
1805 return 0;
1806}
1807EXPORT_SYMBOL(fault_in_readable);
1808
8f942eea
JH
1809/**
1810 * get_dump_page() - pin user page in memory while writing it to core dump
1811 * @addr: user address
1812 *
1813 * Returns struct page pointer of user page pinned for dump,
1814 * to be freed afterwards by put_page().
1815 *
1816 * Returns NULL on any kind of failure - a hole must then be inserted into
1817 * the corefile, to preserve alignment with its headers; and also returns
1818 * NULL wherever the ZERO_PAGE, or an anonymous pte_none, has been found -
f0953a1b 1819 * allowing a hole to be left in the corefile to save disk space.
8f942eea 1820 *
7f3bfab5 1821 * Called without mmap_lock (takes and releases the mmap_lock by itself).
8f942eea
JH
1822 */
1823#ifdef CONFIG_ELF_CORE
1824struct page *get_dump_page(unsigned long addr)
1825{
7f3bfab5 1826 struct mm_struct *mm = current->mm;
8f942eea 1827 struct page *page;
7f3bfab5
JH
1828 int locked = 1;
1829 int ret;
8f942eea 1830
7f3bfab5 1831 if (mmap_read_lock_killable(mm))
8f942eea 1832 return NULL;
7f3bfab5
JH
1833 ret = __get_user_pages_locked(mm, addr, 1, &page, NULL, &locked,
1834 FOLL_FORCE | FOLL_DUMP | FOLL_GET);
1835 if (locked)
1836 mmap_read_unlock(mm);
1837 return (ret == 1) ? page : NULL;
8f942eea
JH
1838}
1839#endif /* CONFIG_ELF_CORE */
1840
d1e153fe 1841#ifdef CONFIG_MIGRATION
f68749ec
PT
1842/*
1843 * Check whether all pages are pinnable, if so return number of pages. If some
1844 * pages are not pinnable, migrate them, and unpin all pages. Return zero if
1845 * pages were migrated, or if some pages were not successfully isolated.
1846 * Return negative error if migration fails.
1847 */
1848static long check_and_migrate_movable_pages(unsigned long nr_pages,
d1e153fe 1849 struct page **pages,
d1e153fe 1850 unsigned int gup_flags)
9a4e9f3b 1851{
f9f38f78 1852 unsigned long isolation_error_count = 0, i;
1b7f7e58 1853 struct folio *prev_folio = NULL;
d1e153fe 1854 LIST_HEAD(movable_page_list);
f9f38f78
CH
1855 bool drain_allow = true;
1856 int ret = 0;
9a4e9f3b 1857
83c02c23 1858 for (i = 0; i < nr_pages; i++) {
1b7f7e58 1859 struct folio *folio = page_folio(pages[i]);
f9f38f78 1860
1b7f7e58 1861 if (folio == prev_folio)
83c02c23 1862 continue;
1b7f7e58 1863 prev_folio = folio;
f9f38f78 1864
1b7f7e58 1865 if (folio_is_pinnable(folio))
f9f38f78
CH
1866 continue;
1867
9a4e9f3b 1868 /*
f9f38f78 1869 * Try to move out any movable page before pinning the range.
9a4e9f3b 1870 */
1b7f7e58
MWO
1871 if (folio_test_hugetlb(folio)) {
1872 if (!isolate_huge_page(&folio->page,
1873 &movable_page_list))
f9f38f78
CH
1874 isolation_error_count++;
1875 continue;
1876 }
9a4e9f3b 1877
1b7f7e58 1878 if (!folio_test_lru(folio) && drain_allow) {
f9f38f78
CH
1879 lru_add_drain_all();
1880 drain_allow = false;
1881 }
1882
1b7f7e58 1883 if (folio_isolate_lru(folio)) {
f9f38f78
CH
1884 isolation_error_count++;
1885 continue;
9a4e9f3b 1886 }
1b7f7e58
MWO
1887 list_add_tail(&folio->lru, &movable_page_list);
1888 node_stat_mod_folio(folio,
1889 NR_ISOLATED_ANON + folio_is_file_lru(folio),
1890 folio_nr_pages(folio));
9a4e9f3b
AK
1891 }
1892
f9f38f78
CH
1893 if (!list_empty(&movable_page_list) || isolation_error_count)
1894 goto unpin_pages;
1895
6e7f34eb
PT
1896 /*
1897 * If list is empty, and no isolation errors, means that all pages are
1898 * in the correct zone.
1899 */
f9f38f78 1900 return nr_pages;
6e7f34eb 1901
f9f38f78 1902unpin_pages:
f68749ec
PT
1903 if (gup_flags & FOLL_PIN) {
1904 unpin_user_pages(pages, nr_pages);
1905 } else {
1906 for (i = 0; i < nr_pages; i++)
1907 put_page(pages[i]);
1908 }
f9f38f78 1909
d1e153fe 1910 if (!list_empty(&movable_page_list)) {
f9f38f78
CH
1911 struct migration_target_control mtc = {
1912 .nid = NUMA_NO_NODE,
1913 .gfp_mask = GFP_USER | __GFP_NOWARN,
1914 };
1915
d1e153fe 1916 ret = migrate_pages(&movable_page_list, alloc_migration_target,
f0f44638 1917 NULL, (unsigned long)&mtc, MIGRATE_SYNC,
5ac95884 1918 MR_LONGTERM_PIN, NULL);
f9f38f78
CH
1919 if (ret > 0) /* number of pages not migrated */
1920 ret = -ENOMEM;
9a4e9f3b
AK
1921 }
1922
f9f38f78
CH
1923 if (ret && !list_empty(&movable_page_list))
1924 putback_movable_pages(&movable_page_list);
1925 return ret;
9a4e9f3b
AK
1926}
1927#else
f68749ec 1928static long check_and_migrate_movable_pages(unsigned long nr_pages,
d1e153fe 1929 struct page **pages,
d1e153fe 1930 unsigned int gup_flags)
9a4e9f3b
AK
1931{
1932 return nr_pages;
1933}
d1e153fe 1934#endif /* CONFIG_MIGRATION */
9a4e9f3b 1935
2bb6d283 1936/*
932f4a63
IW
1937 * __gup_longterm_locked() is a wrapper for __get_user_pages_locked which
1938 * allows us to process the FOLL_LONGTERM flag.
2bb6d283 1939 */
64019a2e 1940static long __gup_longterm_locked(struct mm_struct *mm,
932f4a63
IW
1941 unsigned long start,
1942 unsigned long nr_pages,
1943 struct page **pages,
1944 struct vm_area_struct **vmas,
1945 unsigned int gup_flags)
2bb6d283 1946{
f68749ec 1947 unsigned int flags;
52650c8b 1948 long rc;
2bb6d283 1949
f68749ec
PT
1950 if (!(gup_flags & FOLL_LONGTERM))
1951 return __get_user_pages_locked(mm, start, nr_pages, pages, vmas,
1952 NULL, gup_flags);
1953 flags = memalloc_pin_save();
1954 do {
1955 rc = __get_user_pages_locked(mm, start, nr_pages, pages, vmas,
1956 NULL, gup_flags);
1957 if (rc <= 0)
1958 break;
1959 rc = check_and_migrate_movable_pages(rc, pages, gup_flags);
1960 } while (!rc);
1961 memalloc_pin_restore(flags);
2bb6d283 1962
2bb6d283
DW
1963 return rc;
1964}
932f4a63 1965
447f3e45
BS
1966static bool is_valid_gup_flags(unsigned int gup_flags)
1967{
1968 /*
1969 * FOLL_PIN must only be set internally by the pin_user_pages*() APIs,
1970 * never directly by the caller, so enforce that with an assertion:
1971 */
1972 if (WARN_ON_ONCE(gup_flags & FOLL_PIN))
1973 return false;
1974 /*
1975 * FOLL_PIN is a prerequisite to FOLL_LONGTERM. Another way of saying
1976 * that is, FOLL_LONGTERM is a specific case, more restrictive case of
1977 * FOLL_PIN.
1978 */
1979 if (WARN_ON_ONCE(gup_flags & FOLL_LONGTERM))
1980 return false;
1981
1982 return true;
1983}
1984
22bf29b6 1985#ifdef CONFIG_MMU
64019a2e 1986static long __get_user_pages_remote(struct mm_struct *mm,
22bf29b6
JH
1987 unsigned long start, unsigned long nr_pages,
1988 unsigned int gup_flags, struct page **pages,
1989 struct vm_area_struct **vmas, int *locked)
1990{
1991 /*
1992 * Parts of FOLL_LONGTERM behavior are incompatible with
1993 * FAULT_FLAG_ALLOW_RETRY because of the FS DAX check requirement on
1994 * vmas. However, this only comes up if locked is set, and there are
1995 * callers that do request FOLL_LONGTERM, but do not set locked. So,
1996 * allow what we can.
1997 */
1998 if (gup_flags & FOLL_LONGTERM) {
1999 if (WARN_ON_ONCE(locked))
2000 return -EINVAL;
2001 /*
2002 * This will check the vmas (even if our vmas arg is NULL)
2003 * and return -ENOTSUPP if DAX isn't allowed in this case:
2004 */
64019a2e 2005 return __gup_longterm_locked(mm, start, nr_pages, pages,
22bf29b6
JH
2006 vmas, gup_flags | FOLL_TOUCH |
2007 FOLL_REMOTE);
2008 }
2009
64019a2e 2010 return __get_user_pages_locked(mm, start, nr_pages, pages, vmas,
22bf29b6
JH
2011 locked,
2012 gup_flags | FOLL_TOUCH | FOLL_REMOTE);
2013}
2014
adc8cb40 2015/**
c4237f8b 2016 * get_user_pages_remote() - pin user pages in memory
c4237f8b
JH
2017 * @mm: mm_struct of target mm
2018 * @start: starting user address
2019 * @nr_pages: number of pages from start to pin
2020 * @gup_flags: flags modifying lookup behaviour
2021 * @pages: array that receives pointers to the pages pinned.
2022 * Should be at least nr_pages long. Or NULL, if caller
2023 * only intends to ensure the pages are faulted in.
2024 * @vmas: array of pointers to vmas corresponding to each page.
2025 * Or NULL if the caller does not require them.
2026 * @locked: pointer to lock flag indicating whether lock is held and
2027 * subsequently whether VM_FAULT_RETRY functionality can be
2028 * utilised. Lock must initially be held.
2029 *
2030 * Returns either number of pages pinned (which may be less than the
2031 * number requested), or an error. Details about the return value:
2032 *
2033 * -- If nr_pages is 0, returns 0.
2034 * -- If nr_pages is >0, but no pages were pinned, returns -errno.
2035 * -- If nr_pages is >0, and some pages were pinned, returns the number of
2036 * pages pinned. Again, this may be less than nr_pages.
2037 *
2038 * The caller is responsible for releasing returned @pages, via put_page().
2039 *
c1e8d7c6 2040 * @vmas are valid only as long as mmap_lock is held.
c4237f8b 2041 *
c1e8d7c6 2042 * Must be called with mmap_lock held for read or write.
c4237f8b 2043 *
adc8cb40
SJ
2044 * get_user_pages_remote walks a process's page tables and takes a reference
2045 * to each struct page that each user address corresponds to at a given
c4237f8b
JH
2046 * instant. That is, it takes the page that would be accessed if a user
2047 * thread accesses the given user virtual address at that instant.
2048 *
2049 * This does not guarantee that the page exists in the user mappings when
adc8cb40 2050 * get_user_pages_remote returns, and there may even be a completely different
c4237f8b
JH
2051 * page there in some cases (eg. if mmapped pagecache has been invalidated
2052 * and subsequently re faulted). However it does guarantee that the page
2053 * won't be freed completely. And mostly callers simply care that the page
2054 * contains data that was valid *at some point in time*. Typically, an IO
2055 * or similar operation cannot guarantee anything stronger anyway because
2056 * locks can't be held over the syscall boundary.
2057 *
2058 * If gup_flags & FOLL_WRITE == 0, the page must not be written to. If the page
2059 * is written to, set_page_dirty (or set_page_dirty_lock, as appropriate) must
2060 * be called after the page is finished with, and before put_page is called.
2061 *
adc8cb40
SJ
2062 * get_user_pages_remote is typically used for fewer-copy IO operations,
2063 * to get a handle on the memory by some means other than accesses
2064 * via the user virtual addresses. The pages may be submitted for
2065 * DMA to devices or accessed via their kernel linear mapping (via the
2066 * kmap APIs). Care should be taken to use the correct cache flushing APIs.
c4237f8b
JH
2067 *
2068 * See also get_user_pages_fast, for performance critical applications.
2069 *
adc8cb40 2070 * get_user_pages_remote should be phased out in favor of
c4237f8b 2071 * get_user_pages_locked|unlocked or get_user_pages_fast. Nothing
adc8cb40 2072 * should use get_user_pages_remote because it cannot pass
c4237f8b
JH
2073 * FAULT_FLAG_ALLOW_RETRY to handle_mm_fault.
2074 */
64019a2e 2075long get_user_pages_remote(struct mm_struct *mm,
c4237f8b
JH
2076 unsigned long start, unsigned long nr_pages,
2077 unsigned int gup_flags, struct page **pages,
2078 struct vm_area_struct **vmas, int *locked)
2079{
447f3e45 2080 if (!is_valid_gup_flags(gup_flags))
eddb1c22
JH
2081 return -EINVAL;
2082
64019a2e 2083 return __get_user_pages_remote(mm, start, nr_pages, gup_flags,
22bf29b6 2084 pages, vmas, locked);
c4237f8b
JH
2085}
2086EXPORT_SYMBOL(get_user_pages_remote);
2087
eddb1c22 2088#else /* CONFIG_MMU */
64019a2e 2089long get_user_pages_remote(struct mm_struct *mm,
eddb1c22
JH
2090 unsigned long start, unsigned long nr_pages,
2091 unsigned int gup_flags, struct page **pages,
2092 struct vm_area_struct **vmas, int *locked)
2093{
2094 return 0;
2095}
3faa52c0 2096
64019a2e 2097static long __get_user_pages_remote(struct mm_struct *mm,
3faa52c0
JH
2098 unsigned long start, unsigned long nr_pages,
2099 unsigned int gup_flags, struct page **pages,
2100 struct vm_area_struct **vmas, int *locked)
2101{
2102 return 0;
2103}
eddb1c22
JH
2104#endif /* !CONFIG_MMU */
2105
adc8cb40
SJ
2106/**
2107 * get_user_pages() - pin user pages in memory
2108 * @start: starting user address
2109 * @nr_pages: number of pages from start to pin
2110 * @gup_flags: flags modifying lookup behaviour
2111 * @pages: array that receives pointers to the pages pinned.
2112 * Should be at least nr_pages long. Or NULL, if caller
2113 * only intends to ensure the pages are faulted in.
2114 * @vmas: array of pointers to vmas corresponding to each page.
2115 * Or NULL if the caller does not require them.
2116 *
64019a2e
PX
2117 * This is the same as get_user_pages_remote(), just with a less-flexible
2118 * calling convention where we assume that the mm being operated on belongs to
2119 * the current task, and doesn't allow passing of a locked parameter. We also
2120 * obviously don't pass FOLL_REMOTE in here.
932f4a63
IW
2121 */
2122long get_user_pages(unsigned long start, unsigned long nr_pages,
2123 unsigned int gup_flags, struct page **pages,
2124 struct vm_area_struct **vmas)
2125{
447f3e45 2126 if (!is_valid_gup_flags(gup_flags))
eddb1c22
JH
2127 return -EINVAL;
2128
64019a2e 2129 return __gup_longterm_locked(current->mm, start, nr_pages,
932f4a63
IW
2130 pages, vmas, gup_flags | FOLL_TOUCH);
2131}
2132EXPORT_SYMBOL(get_user_pages);
2bb6d283 2133
acc3c8d1 2134/*
d3649f68 2135 * get_user_pages_unlocked() is suitable to replace the form:
acc3c8d1 2136 *
3e4e28c5 2137 * mmap_read_lock(mm);
64019a2e 2138 * get_user_pages(mm, ..., pages, NULL);
3e4e28c5 2139 * mmap_read_unlock(mm);
d3649f68
CH
2140 *
2141 * with:
2142 *
64019a2e 2143 * get_user_pages_unlocked(mm, ..., pages);
d3649f68
CH
2144 *
2145 * It is functionally equivalent to get_user_pages_fast so
2146 * get_user_pages_fast should be used instead if specific gup_flags
2147 * (e.g. FOLL_FORCE) are not required.
acc3c8d1 2148 */
d3649f68
CH
2149long get_user_pages_unlocked(unsigned long start, unsigned long nr_pages,
2150 struct page **pages, unsigned int gup_flags)
acc3c8d1
KS
2151{
2152 struct mm_struct *mm = current->mm;
d3649f68
CH
2153 int locked = 1;
2154 long ret;
acc3c8d1 2155
d3649f68
CH
2156 /*
2157 * FIXME: Current FOLL_LONGTERM behavior is incompatible with
2158 * FAULT_FLAG_ALLOW_RETRY because of the FS DAX check requirement on
2159 * vmas. As there are no users of this flag in this call we simply
2160 * disallow this option for now.
2161 */
2162 if (WARN_ON_ONCE(gup_flags & FOLL_LONGTERM))
2163 return -EINVAL;
acc3c8d1 2164
d8ed45c5 2165 mmap_read_lock(mm);
64019a2e 2166 ret = __get_user_pages_locked(mm, start, nr_pages, pages, NULL,
d3649f68 2167 &locked, gup_flags | FOLL_TOUCH);
acc3c8d1 2168 if (locked)
d8ed45c5 2169 mmap_read_unlock(mm);
d3649f68 2170 return ret;
4bbd4c77 2171}
d3649f68 2172EXPORT_SYMBOL(get_user_pages_unlocked);
2667f50e
SC
2173
2174/*
67a929e0 2175 * Fast GUP
2667f50e
SC
2176 *
2177 * get_user_pages_fast attempts to pin user pages by walking the page
2178 * tables directly and avoids taking locks. Thus the walker needs to be
2179 * protected from page table pages being freed from under it, and should
2180 * block any THP splits.
2181 *
2182 * One way to achieve this is to have the walker disable interrupts, and
2183 * rely on IPIs from the TLB flushing code blocking before the page table
2184 * pages are freed. This is unsuitable for architectures that do not need
2185 * to broadcast an IPI when invalidating TLBs.
2186 *
2187 * Another way to achieve this is to batch up page table containing pages
2188 * belonging to more than one mm_user, then rcu_sched a callback to free those
2189 * pages. Disabling interrupts will allow the fast_gup walker to both block
2190 * the rcu_sched callback, and an IPI that we broadcast for splitting THPs
2191 * (which is a relatively rare event). The code below adopts this strategy.
2192 *
2193 * Before activating this code, please be aware that the following assumptions
2194 * are currently made:
2195 *
ff2e6d72 2196 * *) Either MMU_GATHER_RCU_TABLE_FREE is enabled, and tlb_remove_table() is used to
e585513b 2197 * free pages containing page tables or TLB flushing requires IPI broadcast.
2667f50e 2198 *
2667f50e
SC
2199 * *) ptes can be read atomically by the architecture.
2200 *
2201 * *) access_ok is sufficient to validate userspace address ranges.
2202 *
2203 * The last two assumptions can be relaxed by the addition of helper functions.
2204 *
2205 * This code is based heavily on the PowerPC implementation by Nick Piggin.
2206 */
67a929e0 2207#ifdef CONFIG_HAVE_FAST_GUP
3faa52c0 2208
790c7369 2209static void __maybe_unused undo_dev_pagemap(int *nr, int nr_start,
3b78d834 2210 unsigned int flags,
790c7369 2211 struct page **pages)
b59f65fa
KS
2212{
2213 while ((*nr) - nr_start) {
2214 struct page *page = pages[--(*nr)];
2215
2216 ClearPageReferenced(page);
3faa52c0
JH
2217 if (flags & FOLL_PIN)
2218 unpin_user_page(page);
2219 else
2220 put_page(page);
b59f65fa
KS
2221 }
2222}
2223
3010a5ea 2224#ifdef CONFIG_ARCH_HAS_PTE_SPECIAL
2667f50e 2225static int gup_pte_range(pmd_t pmd, unsigned long addr, unsigned long end,
b798bec4 2226 unsigned int flags, struct page **pages, int *nr)
2667f50e 2227{
b59f65fa
KS
2228 struct dev_pagemap *pgmap = NULL;
2229 int nr_start = *nr, ret = 0;
2667f50e 2230 pte_t *ptep, *ptem;
2667f50e
SC
2231
2232 ptem = ptep = pte_offset_map(&pmd, addr);
2233 do {
2a4a06da 2234 pte_t pte = ptep_get_lockless(ptep);
b0496fe4
MWO
2235 struct page *page;
2236 struct folio *folio;
2667f50e
SC
2237
2238 /*
2239 * Similar to the PMD case below, NUMA hinting must take slow
8a0516ed 2240 * path using the pte_protnone check.
2667f50e 2241 */
e7884f8e
KS
2242 if (pte_protnone(pte))
2243 goto pte_unmap;
2244
b798bec4 2245 if (!pte_access_permitted(pte, flags & FOLL_WRITE))
e7884f8e
KS
2246 goto pte_unmap;
2247
b59f65fa 2248 if (pte_devmap(pte)) {
7af75561
IW
2249 if (unlikely(flags & FOLL_LONGTERM))
2250 goto pte_unmap;
2251
b59f65fa
KS
2252 pgmap = get_dev_pagemap(pte_pfn(pte), pgmap);
2253 if (unlikely(!pgmap)) {
3b78d834 2254 undo_dev_pagemap(nr, nr_start, flags, pages);
b59f65fa
KS
2255 goto pte_unmap;
2256 }
2257 } else if (pte_special(pte))
2667f50e
SC
2258 goto pte_unmap;
2259
2260 VM_BUG_ON(!pfn_valid(pte_pfn(pte)));
2261 page = pte_page(pte);
2262
b0496fe4
MWO
2263 folio = try_grab_folio(page, 1, flags);
2264 if (!folio)
2667f50e
SC
2265 goto pte_unmap;
2266
1507f512 2267 if (unlikely(page_is_secretmem(page))) {
b0496fe4 2268 gup_put_folio(folio, 1, flags);
1507f512
MR
2269 goto pte_unmap;
2270 }
2271
2667f50e 2272 if (unlikely(pte_val(pte) != pte_val(*ptep))) {
b0496fe4 2273 gup_put_folio(folio, 1, flags);
2667f50e
SC
2274 goto pte_unmap;
2275 }
2276
a7f22660
DH
2277 if (!pte_write(pte) && gup_must_unshare(flags, page)) {
2278 gup_put_folio(folio, 1, flags);
2279 goto pte_unmap;
2280 }
2281
f28d4363
CI
2282 /*
2283 * We need to make the page accessible if and only if we are
2284 * going to access its content (the FOLL_PIN case). Please
2285 * see Documentation/core-api/pin_user_pages.rst for
2286 * details.
2287 */
2288 if (flags & FOLL_PIN) {
2289 ret = arch_make_page_accessible(page);
2290 if (ret) {
b0496fe4 2291 gup_put_folio(folio, 1, flags);
f28d4363
CI
2292 goto pte_unmap;
2293 }
2294 }
b0496fe4 2295 folio_set_referenced(folio);
2667f50e
SC
2296 pages[*nr] = page;
2297 (*nr)++;
2667f50e
SC
2298 } while (ptep++, addr += PAGE_SIZE, addr != end);
2299
2300 ret = 1;
2301
2302pte_unmap:
832d7aa0
CH
2303 if (pgmap)
2304 put_dev_pagemap(pgmap);
2667f50e
SC
2305 pte_unmap(ptem);
2306 return ret;
2307}
2308#else
2309
2310/*
2311 * If we can't determine whether or not a pte is special, then fail immediately
2312 * for ptes. Note, we can still pin HugeTLB and THP as these are guaranteed not
2313 * to be special.
2314 *
2315 * For a futex to be placed on a THP tail page, get_futex_key requires a
dadbb612 2316 * get_user_pages_fast_only implementation that can pin pages. Thus it's still
2667f50e
SC
2317 * useful to have gup_huge_pmd even if we can't operate on ptes.
2318 */
2319static int gup_pte_range(pmd_t pmd, unsigned long addr, unsigned long end,
b798bec4 2320 unsigned int flags, struct page **pages, int *nr)
2667f50e
SC
2321{
2322 return 0;
2323}
3010a5ea 2324#endif /* CONFIG_ARCH_HAS_PTE_SPECIAL */
2667f50e 2325
17596731 2326#if defined(CONFIG_ARCH_HAS_PTE_DEVMAP) && defined(CONFIG_TRANSPARENT_HUGEPAGE)
b59f65fa 2327static int __gup_device_huge(unsigned long pfn, unsigned long addr,
86dfbed4
JH
2328 unsigned long end, unsigned int flags,
2329 struct page **pages, int *nr)
b59f65fa
KS
2330{
2331 int nr_start = *nr;
2332 struct dev_pagemap *pgmap = NULL;
2333
2334 do {
2335 struct page *page = pfn_to_page(pfn);
2336
2337 pgmap = get_dev_pagemap(pfn, pgmap);
2338 if (unlikely(!pgmap)) {
3b78d834 2339 undo_dev_pagemap(nr, nr_start, flags, pages);
6401c4eb 2340 break;
b59f65fa
KS
2341 }
2342 SetPageReferenced(page);
2343 pages[*nr] = page;
3faa52c0
JH
2344 if (unlikely(!try_grab_page(page, flags))) {
2345 undo_dev_pagemap(nr, nr_start, flags, pages);
6401c4eb 2346 break;
3faa52c0 2347 }
b59f65fa
KS
2348 (*nr)++;
2349 pfn++;
2350 } while (addr += PAGE_SIZE, addr != end);
832d7aa0 2351
6401c4eb 2352 put_dev_pagemap(pgmap);
20b7fee7 2353 return addr == end;
b59f65fa
KS
2354}
2355
a9b6de77 2356static int __gup_device_huge_pmd(pmd_t orig, pmd_t *pmdp, unsigned long addr,
86dfbed4
JH
2357 unsigned long end, unsigned int flags,
2358 struct page **pages, int *nr)
b59f65fa
KS
2359{
2360 unsigned long fault_pfn;
a9b6de77
DW
2361 int nr_start = *nr;
2362
2363 fault_pfn = pmd_pfn(orig) + ((addr & ~PMD_MASK) >> PAGE_SHIFT);
86dfbed4 2364 if (!__gup_device_huge(fault_pfn, addr, end, flags, pages, nr))
a9b6de77 2365 return 0;
b59f65fa 2366
a9b6de77 2367 if (unlikely(pmd_val(orig) != pmd_val(*pmdp))) {
3b78d834 2368 undo_dev_pagemap(nr, nr_start, flags, pages);
a9b6de77
DW
2369 return 0;
2370 }
2371 return 1;
b59f65fa
KS
2372}
2373
a9b6de77 2374static int __gup_device_huge_pud(pud_t orig, pud_t *pudp, unsigned long addr,
86dfbed4
JH
2375 unsigned long end, unsigned int flags,
2376 struct page **pages, int *nr)
b59f65fa
KS
2377{
2378 unsigned long fault_pfn;
a9b6de77
DW
2379 int nr_start = *nr;
2380
2381 fault_pfn = pud_pfn(orig) + ((addr & ~PUD_MASK) >> PAGE_SHIFT);
86dfbed4 2382 if (!__gup_device_huge(fault_pfn, addr, end, flags, pages, nr))
a9b6de77 2383 return 0;
b59f65fa 2384
a9b6de77 2385 if (unlikely(pud_val(orig) != pud_val(*pudp))) {
3b78d834 2386 undo_dev_pagemap(nr, nr_start, flags, pages);
a9b6de77
DW
2387 return 0;
2388 }
2389 return 1;
b59f65fa
KS
2390}
2391#else
a9b6de77 2392static int __gup_device_huge_pmd(pmd_t orig, pmd_t *pmdp, unsigned long addr,
86dfbed4
JH
2393 unsigned long end, unsigned int flags,
2394 struct page **pages, int *nr)
b59f65fa
KS
2395{
2396 BUILD_BUG();
2397 return 0;
2398}
2399
a9b6de77 2400static int __gup_device_huge_pud(pud_t pud, pud_t *pudp, unsigned long addr,
86dfbed4
JH
2401 unsigned long end, unsigned int flags,
2402 struct page **pages, int *nr)
b59f65fa
KS
2403{
2404 BUILD_BUG();
2405 return 0;
2406}
2407#endif
2408
a43e9820
JH
2409static int record_subpages(struct page *page, unsigned long addr,
2410 unsigned long end, struct page **pages)
2411{
2412 int nr;
2413
c228afb1
MWO
2414 for (nr = 0; addr != end; nr++, addr += PAGE_SIZE)
2415 pages[nr] = nth_page(page, nr);
a43e9820
JH
2416
2417 return nr;
2418}
2419
cbd34da7
CH
2420#ifdef CONFIG_ARCH_HAS_HUGEPD
2421static unsigned long hugepte_addr_end(unsigned long addr, unsigned long end,
2422 unsigned long sz)
2423{
2424 unsigned long __boundary = (addr + sz) & ~(sz-1);
2425 return (__boundary - 1 < end - 1) ? __boundary : end;
2426}
2427
2428static int gup_hugepte(pte_t *ptep, unsigned long sz, unsigned long addr,
0cd22afd
JH
2429 unsigned long end, unsigned int flags,
2430 struct page **pages, int *nr)
cbd34da7
CH
2431{
2432 unsigned long pte_end;
09a1626e
MWO
2433 struct page *page;
2434 struct folio *folio;
cbd34da7
CH
2435 pte_t pte;
2436 int refs;
2437
2438 pte_end = (addr + sz) & ~(sz-1);
2439 if (pte_end < end)
2440 end = pte_end;
2441
55ca2263 2442 pte = huge_ptep_get(ptep);
cbd34da7 2443
0cd22afd 2444 if (!pte_access_permitted(pte, flags & FOLL_WRITE))
cbd34da7
CH
2445 return 0;
2446
2447 /* hugepages are never "special" */
2448 VM_BUG_ON(!pfn_valid(pte_pfn(pte)));
2449
09a1626e 2450 page = nth_page(pte_page(pte), (addr & (sz - 1)) >> PAGE_SHIFT);
a43e9820 2451 refs = record_subpages(page, addr, end, pages + *nr);
cbd34da7 2452
09a1626e
MWO
2453 folio = try_grab_folio(page, refs, flags);
2454 if (!folio)
cbd34da7 2455 return 0;
cbd34da7
CH
2456
2457 if (unlikely(pte_val(pte) != pte_val(*ptep))) {
09a1626e 2458 gup_put_folio(folio, refs, flags);
cbd34da7
CH
2459 return 0;
2460 }
2461
a7f22660
DH
2462 if (!pte_write(pte) && gup_must_unshare(flags, &folio->page)) {
2463 gup_put_folio(folio, refs, flags);
2464 return 0;
2465 }
2466
a43e9820 2467 *nr += refs;
09a1626e 2468 folio_set_referenced(folio);
cbd34da7
CH
2469 return 1;
2470}
2471
2472static int gup_huge_pd(hugepd_t hugepd, unsigned long addr,
0cd22afd 2473 unsigned int pdshift, unsigned long end, unsigned int flags,
cbd34da7
CH
2474 struct page **pages, int *nr)
2475{
2476 pte_t *ptep;
2477 unsigned long sz = 1UL << hugepd_shift(hugepd);
2478 unsigned long next;
2479
2480 ptep = hugepte_offset(hugepd, addr, pdshift);
2481 do {
2482 next = hugepte_addr_end(addr, end, sz);
0cd22afd 2483 if (!gup_hugepte(ptep, sz, addr, end, flags, pages, nr))
cbd34da7
CH
2484 return 0;
2485 } while (ptep++, addr = next, addr != end);
2486
2487 return 1;
2488}
2489#else
2490static inline int gup_huge_pd(hugepd_t hugepd, unsigned long addr,
0cd22afd 2491 unsigned int pdshift, unsigned long end, unsigned int flags,
cbd34da7
CH
2492 struct page **pages, int *nr)
2493{
2494 return 0;
2495}
2496#endif /* CONFIG_ARCH_HAS_HUGEPD */
2497
2667f50e 2498static int gup_huge_pmd(pmd_t orig, pmd_t *pmdp, unsigned long addr,
0cd22afd
JH
2499 unsigned long end, unsigned int flags,
2500 struct page **pages, int *nr)
2667f50e 2501{
667ed1f7
MWO
2502 struct page *page;
2503 struct folio *folio;
2667f50e
SC
2504 int refs;
2505
b798bec4 2506 if (!pmd_access_permitted(orig, flags & FOLL_WRITE))
2667f50e
SC
2507 return 0;
2508
7af75561
IW
2509 if (pmd_devmap(orig)) {
2510 if (unlikely(flags & FOLL_LONGTERM))
2511 return 0;
86dfbed4
JH
2512 return __gup_device_huge_pmd(orig, pmdp, addr, end, flags,
2513 pages, nr);
7af75561 2514 }
b59f65fa 2515
c228afb1 2516 page = nth_page(pmd_page(orig), (addr & ~PMD_MASK) >> PAGE_SHIFT);
a43e9820 2517 refs = record_subpages(page, addr, end, pages + *nr);
2667f50e 2518
667ed1f7
MWO
2519 folio = try_grab_folio(page, refs, flags);
2520 if (!folio)
2667f50e 2521 return 0;
2667f50e
SC
2522
2523 if (unlikely(pmd_val(orig) != pmd_val(*pmdp))) {
667ed1f7 2524 gup_put_folio(folio, refs, flags);
2667f50e
SC
2525 return 0;
2526 }
2527
a7f22660
DH
2528 if (!pmd_write(orig) && gup_must_unshare(flags, &folio->page)) {
2529 gup_put_folio(folio, refs, flags);
2530 return 0;
2531 }
2532
a43e9820 2533 *nr += refs;
667ed1f7 2534 folio_set_referenced(folio);
2667f50e
SC
2535 return 1;
2536}
2537
2538static int gup_huge_pud(pud_t orig, pud_t *pudp, unsigned long addr,
86dfbed4
JH
2539 unsigned long end, unsigned int flags,
2540 struct page **pages, int *nr)
2667f50e 2541{
83afb52e
MWO
2542 struct page *page;
2543 struct folio *folio;
2667f50e
SC
2544 int refs;
2545
b798bec4 2546 if (!pud_access_permitted(orig, flags & FOLL_WRITE))
2667f50e
SC
2547 return 0;
2548
7af75561
IW
2549 if (pud_devmap(orig)) {
2550 if (unlikely(flags & FOLL_LONGTERM))
2551 return 0;
86dfbed4
JH
2552 return __gup_device_huge_pud(orig, pudp, addr, end, flags,
2553 pages, nr);
7af75561 2554 }
b59f65fa 2555
c228afb1 2556 page = nth_page(pud_page(orig), (addr & ~PUD_MASK) >> PAGE_SHIFT);
a43e9820 2557 refs = record_subpages(page, addr, end, pages + *nr);
2667f50e 2558
83afb52e
MWO
2559 folio = try_grab_folio(page, refs, flags);
2560 if (!folio)
2667f50e 2561 return 0;
2667f50e
SC
2562
2563 if (unlikely(pud_val(orig) != pud_val(*pudp))) {
83afb52e 2564 gup_put_folio(folio, refs, flags);
2667f50e
SC
2565 return 0;
2566 }
2567
a7f22660
DH
2568 if (!pud_write(orig) && gup_must_unshare(flags, &folio->page)) {
2569 gup_put_folio(folio, refs, flags);
2570 return 0;
2571 }
2572
a43e9820 2573 *nr += refs;
83afb52e 2574 folio_set_referenced(folio);
2667f50e
SC
2575 return 1;
2576}
2577
f30c59e9 2578static int gup_huge_pgd(pgd_t orig, pgd_t *pgdp, unsigned long addr,
b798bec4 2579 unsigned long end, unsigned int flags,
f30c59e9
AK
2580 struct page **pages, int *nr)
2581{
2582 int refs;
2d7919a2
MWO
2583 struct page *page;
2584 struct folio *folio;
f30c59e9 2585
b798bec4 2586 if (!pgd_access_permitted(orig, flags & FOLL_WRITE))
f30c59e9
AK
2587 return 0;
2588
b59f65fa 2589 BUILD_BUG_ON(pgd_devmap(orig));
a43e9820 2590
c228afb1 2591 page = nth_page(pgd_page(orig), (addr & ~PGDIR_MASK) >> PAGE_SHIFT);
a43e9820 2592 refs = record_subpages(page, addr, end, pages + *nr);
f30c59e9 2593
2d7919a2
MWO
2594 folio = try_grab_folio(page, refs, flags);
2595 if (!folio)
f30c59e9 2596 return 0;
f30c59e9
AK
2597
2598 if (unlikely(pgd_val(orig) != pgd_val(*pgdp))) {
2d7919a2 2599 gup_put_folio(folio, refs, flags);
f30c59e9
AK
2600 return 0;
2601 }
2602
a43e9820 2603 *nr += refs;
2d7919a2 2604 folio_set_referenced(folio);
f30c59e9
AK
2605 return 1;
2606}
2607
d3f7b1bb 2608static int gup_pmd_range(pud_t *pudp, pud_t pud, unsigned long addr, unsigned long end,
b798bec4 2609 unsigned int flags, struct page **pages, int *nr)
2667f50e
SC
2610{
2611 unsigned long next;
2612 pmd_t *pmdp;
2613
d3f7b1bb 2614 pmdp = pmd_offset_lockless(pudp, pud, addr);
2667f50e 2615 do {
38c5ce93 2616 pmd_t pmd = READ_ONCE(*pmdp);
2667f50e
SC
2617
2618 next = pmd_addr_end(addr, end);
84c3fc4e 2619 if (!pmd_present(pmd))
2667f50e
SC
2620 return 0;
2621
414fd080
YZ
2622 if (unlikely(pmd_trans_huge(pmd) || pmd_huge(pmd) ||
2623 pmd_devmap(pmd))) {
2667f50e
SC
2624 /*
2625 * NUMA hinting faults need to be handled in the GUP
2626 * slowpath for accounting purposes and so that they
2627 * can be serialised against THP migration.
2628 */
8a0516ed 2629 if (pmd_protnone(pmd))
2667f50e
SC
2630 return 0;
2631
b798bec4 2632 if (!gup_huge_pmd(pmd, pmdp, addr, next, flags,
2667f50e
SC
2633 pages, nr))
2634 return 0;
2635
f30c59e9
AK
2636 } else if (unlikely(is_hugepd(__hugepd(pmd_val(pmd))))) {
2637 /*
2638 * architecture have different format for hugetlbfs
2639 * pmd format and THP pmd format
2640 */
2641 if (!gup_huge_pd(__hugepd(pmd_val(pmd)), addr,
b798bec4 2642 PMD_SHIFT, next, flags, pages, nr))
f30c59e9 2643 return 0;
b798bec4 2644 } else if (!gup_pte_range(pmd, addr, next, flags, pages, nr))
2923117b 2645 return 0;
2667f50e
SC
2646 } while (pmdp++, addr = next, addr != end);
2647
2648 return 1;
2649}
2650
d3f7b1bb 2651static int gup_pud_range(p4d_t *p4dp, p4d_t p4d, unsigned long addr, unsigned long end,
b798bec4 2652 unsigned int flags, struct page **pages, int *nr)
2667f50e
SC
2653{
2654 unsigned long next;
2655 pud_t *pudp;
2656
d3f7b1bb 2657 pudp = pud_offset_lockless(p4dp, p4d, addr);
2667f50e 2658 do {
e37c6982 2659 pud_t pud = READ_ONCE(*pudp);
2667f50e
SC
2660
2661 next = pud_addr_end(addr, end);
15494520 2662 if (unlikely(!pud_present(pud)))
2667f50e 2663 return 0;
f30c59e9 2664 if (unlikely(pud_huge(pud))) {
b798bec4 2665 if (!gup_huge_pud(pud, pudp, addr, next, flags,
f30c59e9
AK
2666 pages, nr))
2667 return 0;
2668 } else if (unlikely(is_hugepd(__hugepd(pud_val(pud))))) {
2669 if (!gup_huge_pd(__hugepd(pud_val(pud)), addr,
b798bec4 2670 PUD_SHIFT, next, flags, pages, nr))
2667f50e 2671 return 0;
d3f7b1bb 2672 } else if (!gup_pmd_range(pudp, pud, addr, next, flags, pages, nr))
2667f50e
SC
2673 return 0;
2674 } while (pudp++, addr = next, addr != end);
2675
2676 return 1;
2677}
2678
d3f7b1bb 2679static int gup_p4d_range(pgd_t *pgdp, pgd_t pgd, unsigned long addr, unsigned long end,
b798bec4 2680 unsigned int flags, struct page **pages, int *nr)
c2febafc
KS
2681{
2682 unsigned long next;
2683 p4d_t *p4dp;
2684
d3f7b1bb 2685 p4dp = p4d_offset_lockless(pgdp, pgd, addr);
c2febafc
KS
2686 do {
2687 p4d_t p4d = READ_ONCE(*p4dp);
2688
2689 next = p4d_addr_end(addr, end);
2690 if (p4d_none(p4d))
2691 return 0;
2692 BUILD_BUG_ON(p4d_huge(p4d));
2693 if (unlikely(is_hugepd(__hugepd(p4d_val(p4d))))) {
2694 if (!gup_huge_pd(__hugepd(p4d_val(p4d)), addr,
b798bec4 2695 P4D_SHIFT, next, flags, pages, nr))
c2febafc 2696 return 0;
d3f7b1bb 2697 } else if (!gup_pud_range(p4dp, p4d, addr, next, flags, pages, nr))
c2febafc
KS
2698 return 0;
2699 } while (p4dp++, addr = next, addr != end);
2700
2701 return 1;
2702}
2703
5b65c467 2704static void gup_pgd_range(unsigned long addr, unsigned long end,
b798bec4 2705 unsigned int flags, struct page **pages, int *nr)
5b65c467
KS
2706{
2707 unsigned long next;
2708 pgd_t *pgdp;
2709
2710 pgdp = pgd_offset(current->mm, addr);
2711 do {
2712 pgd_t pgd = READ_ONCE(*pgdp);
2713
2714 next = pgd_addr_end(addr, end);
2715 if (pgd_none(pgd))
2716 return;
2717 if (unlikely(pgd_huge(pgd))) {
b798bec4 2718 if (!gup_huge_pgd(pgd, pgdp, addr, next, flags,
5b65c467
KS
2719 pages, nr))
2720 return;
2721 } else if (unlikely(is_hugepd(__hugepd(pgd_val(pgd))))) {
2722 if (!gup_huge_pd(__hugepd(pgd_val(pgd)), addr,
b798bec4 2723 PGDIR_SHIFT, next, flags, pages, nr))
5b65c467 2724 return;
d3f7b1bb 2725 } else if (!gup_p4d_range(pgdp, pgd, addr, next, flags, pages, nr))
5b65c467
KS
2726 return;
2727 } while (pgdp++, addr = next, addr != end);
2728}
050a9adc
CH
2729#else
2730static inline void gup_pgd_range(unsigned long addr, unsigned long end,
2731 unsigned int flags, struct page **pages, int *nr)
2732{
2733}
2734#endif /* CONFIG_HAVE_FAST_GUP */
5b65c467
KS
2735
2736#ifndef gup_fast_permitted
2737/*
dadbb612 2738 * Check if it's allowed to use get_user_pages_fast_only() for the range, or
5b65c467
KS
2739 * we need to fall back to the slow version:
2740 */
26f4c328 2741static bool gup_fast_permitted(unsigned long start, unsigned long end)
5b65c467 2742{
26f4c328 2743 return true;
5b65c467
KS
2744}
2745#endif
2746
7af75561
IW
2747static int __gup_longterm_unlocked(unsigned long start, int nr_pages,
2748 unsigned int gup_flags, struct page **pages)
2749{
2750 int ret;
2751
2752 /*
2753 * FIXME: FOLL_LONGTERM does not work with
2754 * get_user_pages_unlocked() (see comments in that function)
2755 */
2756 if (gup_flags & FOLL_LONGTERM) {
d8ed45c5 2757 mmap_read_lock(current->mm);
64019a2e 2758 ret = __gup_longterm_locked(current->mm,
7af75561
IW
2759 start, nr_pages,
2760 pages, NULL, gup_flags);
d8ed45c5 2761 mmap_read_unlock(current->mm);
7af75561
IW
2762 } else {
2763 ret = get_user_pages_unlocked(start, nr_pages,
2764 pages, gup_flags);
2765 }
2766
2767 return ret;
2768}
2769
c28b1fc7
JG
2770static unsigned long lockless_pages_from_mm(unsigned long start,
2771 unsigned long end,
2772 unsigned int gup_flags,
2773 struct page **pages)
2774{
2775 unsigned long flags;
2776 int nr_pinned = 0;
57efa1fe 2777 unsigned seq;
c28b1fc7
JG
2778
2779 if (!IS_ENABLED(CONFIG_HAVE_FAST_GUP) ||
2780 !gup_fast_permitted(start, end))
2781 return 0;
2782
57efa1fe
JG
2783 if (gup_flags & FOLL_PIN) {
2784 seq = raw_read_seqcount(&current->mm->write_protect_seq);
2785 if (seq & 1)
2786 return 0;
2787 }
2788
c28b1fc7
JG
2789 /*
2790 * Disable interrupts. The nested form is used, in order to allow full,
2791 * general purpose use of this routine.
2792 *
2793 * With interrupts disabled, we block page table pages from being freed
2794 * from under us. See struct mmu_table_batch comments in
2795 * include/asm-generic/tlb.h for more details.
2796 *
2797 * We do not adopt an rcu_read_lock() here as we also want to block IPIs
2798 * that come from THPs splitting.
2799 */
2800 local_irq_save(flags);
2801 gup_pgd_range(start, end, gup_flags, pages, &nr_pinned);
2802 local_irq_restore(flags);
57efa1fe
JG
2803
2804 /*
2805 * When pinning pages for DMA there could be a concurrent write protect
2806 * from fork() via copy_page_range(), in this case always fail fast GUP.
2807 */
2808 if (gup_flags & FOLL_PIN) {
2809 if (read_seqcount_retry(&current->mm->write_protect_seq, seq)) {
b6a2619c 2810 unpin_user_pages_lockless(pages, nr_pinned);
57efa1fe 2811 return 0;
b6a2619c
DH
2812 } else {
2813 sanity_check_pinned_pages(pages, nr_pinned);
57efa1fe
JG
2814 }
2815 }
c28b1fc7
JG
2816 return nr_pinned;
2817}
2818
2819static int internal_get_user_pages_fast(unsigned long start,
2820 unsigned long nr_pages,
eddb1c22
JH
2821 unsigned int gup_flags,
2822 struct page **pages)
2667f50e 2823{
c28b1fc7
JG
2824 unsigned long len, end;
2825 unsigned long nr_pinned;
2826 int ret;
2667f50e 2827
f4000fdf 2828 if (WARN_ON_ONCE(gup_flags & ~(FOLL_WRITE | FOLL_LONGTERM |
376a34ef 2829 FOLL_FORCE | FOLL_PIN | FOLL_GET |
55b8fe70 2830 FOLL_FAST_ONLY | FOLL_NOFAULT)))
817be129
CH
2831 return -EINVAL;
2832
a458b76a
AA
2833 if (gup_flags & FOLL_PIN)
2834 mm_set_has_pinned_flag(&current->mm->flags);
008cfe44 2835
f81cd178 2836 if (!(gup_flags & FOLL_FAST_ONLY))
da1c55f1 2837 might_lock_read(&current->mm->mmap_lock);
f81cd178 2838
f455c854 2839 start = untagged_addr(start) & PAGE_MASK;
c28b1fc7
JG
2840 len = nr_pages << PAGE_SHIFT;
2841 if (check_add_overflow(start, len, &end))
c61611f7 2842 return 0;
96d4f267 2843 if (unlikely(!access_ok((void __user *)start, len)))
c61611f7 2844 return -EFAULT;
73e10a61 2845
c28b1fc7
JG
2846 nr_pinned = lockless_pages_from_mm(start, end, gup_flags, pages);
2847 if (nr_pinned == nr_pages || gup_flags & FOLL_FAST_ONLY)
2848 return nr_pinned;
2667f50e 2849
c28b1fc7
JG
2850 /* Slow path: try to get the remaining pages with get_user_pages */
2851 start += nr_pinned << PAGE_SHIFT;
2852 pages += nr_pinned;
2853 ret = __gup_longterm_unlocked(start, nr_pages - nr_pinned, gup_flags,
2854 pages);
2855 if (ret < 0) {
2856 /*
2857 * The caller has to unpin the pages we already pinned so
2858 * returning -errno is not an option
2859 */
2860 if (nr_pinned)
2861 return nr_pinned;
2862 return ret;
2667f50e 2863 }
c28b1fc7 2864 return ret + nr_pinned;
2667f50e 2865}
c28b1fc7 2866
dadbb612
SJ
2867/**
2868 * get_user_pages_fast_only() - pin user pages in memory
2869 * @start: starting user address
2870 * @nr_pages: number of pages from start to pin
2871 * @gup_flags: flags modifying pin behaviour
2872 * @pages: array that receives pointers to the pages pinned.
2873 * Should be at least nr_pages long.
2874 *
9e1f0580
JH
2875 * Like get_user_pages_fast() except it's IRQ-safe in that it won't fall back to
2876 * the regular GUP.
2877 * Note a difference with get_user_pages_fast: this always returns the
2878 * number of pages pinned, 0 if no pages were pinned.
2879 *
2880 * If the architecture does not support this function, simply return with no
2881 * pages pinned.
2882 *
2883 * Careful, careful! COW breaking can go either way, so a non-write
2884 * access can get ambiguous page results. If you call this function without
2885 * 'write' set, you'd better be sure that you're ok with that ambiguity.
2886 */
dadbb612
SJ
2887int get_user_pages_fast_only(unsigned long start, int nr_pages,
2888 unsigned int gup_flags, struct page **pages)
9e1f0580 2889{
376a34ef 2890 int nr_pinned;
9e1f0580
JH
2891 /*
2892 * Internally (within mm/gup.c), gup fast variants must set FOLL_GET,
2893 * because gup fast is always a "pin with a +1 page refcount" request.
376a34ef
JH
2894 *
2895 * FOLL_FAST_ONLY is required in order to match the API description of
2896 * this routine: no fall back to regular ("slow") GUP.
9e1f0580 2897 */
dadbb612 2898 gup_flags |= FOLL_GET | FOLL_FAST_ONLY;
9e1f0580 2899
376a34ef
JH
2900 nr_pinned = internal_get_user_pages_fast(start, nr_pages, gup_flags,
2901 pages);
9e1f0580
JH
2902
2903 /*
376a34ef
JH
2904 * As specified in the API description above, this routine is not
2905 * allowed to return negative values. However, the common core
2906 * routine internal_get_user_pages_fast() *can* return -errno.
2907 * Therefore, correct for that here:
9e1f0580 2908 */
376a34ef
JH
2909 if (nr_pinned < 0)
2910 nr_pinned = 0;
9e1f0580
JH
2911
2912 return nr_pinned;
2913}
dadbb612 2914EXPORT_SYMBOL_GPL(get_user_pages_fast_only);
9e1f0580 2915
eddb1c22
JH
2916/**
2917 * get_user_pages_fast() - pin user pages in memory
3faa52c0
JH
2918 * @start: starting user address
2919 * @nr_pages: number of pages from start to pin
2920 * @gup_flags: flags modifying pin behaviour
2921 * @pages: array that receives pointers to the pages pinned.
2922 * Should be at least nr_pages long.
eddb1c22 2923 *
c1e8d7c6 2924 * Attempt to pin user pages in memory without taking mm->mmap_lock.
eddb1c22
JH
2925 * If not successful, it will fall back to taking the lock and
2926 * calling get_user_pages().
2927 *
2928 * Returns number of pages pinned. This may be fewer than the number requested.
2929 * If nr_pages is 0 or negative, returns 0. If no pages were pinned, returns
2930 * -errno.
2931 */
2932int get_user_pages_fast(unsigned long start, int nr_pages,
2933 unsigned int gup_flags, struct page **pages)
2934{
447f3e45 2935 if (!is_valid_gup_flags(gup_flags))
eddb1c22
JH
2936 return -EINVAL;
2937
94202f12
JH
2938 /*
2939 * The caller may or may not have explicitly set FOLL_GET; either way is
2940 * OK. However, internally (within mm/gup.c), gup fast variants must set
2941 * FOLL_GET, because gup fast is always a "pin with a +1 page refcount"
2942 * request.
2943 */
2944 gup_flags |= FOLL_GET;
eddb1c22
JH
2945 return internal_get_user_pages_fast(start, nr_pages, gup_flags, pages);
2946}
050a9adc 2947EXPORT_SYMBOL_GPL(get_user_pages_fast);
eddb1c22
JH
2948
2949/**
2950 * pin_user_pages_fast() - pin user pages in memory without taking locks
2951 *
3faa52c0
JH
2952 * @start: starting user address
2953 * @nr_pages: number of pages from start to pin
2954 * @gup_flags: flags modifying pin behaviour
2955 * @pages: array that receives pointers to the pages pinned.
2956 * Should be at least nr_pages long.
2957 *
2958 * Nearly the same as get_user_pages_fast(), except that FOLL_PIN is set. See
2959 * get_user_pages_fast() for documentation on the function arguments, because
2960 * the arguments here are identical.
2961 *
2962 * FOLL_PIN means that the pages must be released via unpin_user_page(). Please
72ef5e52 2963 * see Documentation/core-api/pin_user_pages.rst for further details.
eddb1c22
JH
2964 */
2965int pin_user_pages_fast(unsigned long start, int nr_pages,
2966 unsigned int gup_flags, struct page **pages)
2967{
3faa52c0
JH
2968 /* FOLL_GET and FOLL_PIN are mutually exclusive. */
2969 if (WARN_ON_ONCE(gup_flags & FOLL_GET))
2970 return -EINVAL;
2971
2972 gup_flags |= FOLL_PIN;
2973 return internal_get_user_pages_fast(start, nr_pages, gup_flags, pages);
eddb1c22
JH
2974}
2975EXPORT_SYMBOL_GPL(pin_user_pages_fast);
2976
104acc32 2977/*
dadbb612
SJ
2978 * This is the FOLL_PIN equivalent of get_user_pages_fast_only(). Behavior
2979 * is the same, except that this one sets FOLL_PIN instead of FOLL_GET.
104acc32
JH
2980 *
2981 * The API rules are the same, too: no negative values may be returned.
2982 */
2983int pin_user_pages_fast_only(unsigned long start, int nr_pages,
2984 unsigned int gup_flags, struct page **pages)
2985{
2986 int nr_pinned;
2987
2988 /*
2989 * FOLL_GET and FOLL_PIN are mutually exclusive. Note that the API
2990 * rules require returning 0, rather than -errno:
2991 */
2992 if (WARN_ON_ONCE(gup_flags & FOLL_GET))
2993 return 0;
2994 /*
2995 * FOLL_FAST_ONLY is required in order to match the API description of
2996 * this routine: no fall back to regular ("slow") GUP.
2997 */
2998 gup_flags |= (FOLL_PIN | FOLL_FAST_ONLY);
2999 nr_pinned = internal_get_user_pages_fast(start, nr_pages, gup_flags,
3000 pages);
3001 /*
3002 * This routine is not allowed to return negative values. However,
3003 * internal_get_user_pages_fast() *can* return -errno. Therefore,
3004 * correct for that here:
3005 */
3006 if (nr_pinned < 0)
3007 nr_pinned = 0;
3008
3009 return nr_pinned;
3010}
3011EXPORT_SYMBOL_GPL(pin_user_pages_fast_only);
3012
eddb1c22 3013/**
64019a2e 3014 * pin_user_pages_remote() - pin pages of a remote process
eddb1c22 3015 *
3faa52c0
JH
3016 * @mm: mm_struct of target mm
3017 * @start: starting user address
3018 * @nr_pages: number of pages from start to pin
3019 * @gup_flags: flags modifying lookup behaviour
3020 * @pages: array that receives pointers to the pages pinned.
3021 * Should be at least nr_pages long. Or NULL, if caller
3022 * only intends to ensure the pages are faulted in.
3023 * @vmas: array of pointers to vmas corresponding to each page.
3024 * Or NULL if the caller does not require them.
3025 * @locked: pointer to lock flag indicating whether lock is held and
3026 * subsequently whether VM_FAULT_RETRY functionality can be
3027 * utilised. Lock must initially be held.
3028 *
3029 * Nearly the same as get_user_pages_remote(), except that FOLL_PIN is set. See
3030 * get_user_pages_remote() for documentation on the function arguments, because
3031 * the arguments here are identical.
3032 *
3033 * FOLL_PIN means that the pages must be released via unpin_user_page(). Please
72ef5e52 3034 * see Documentation/core-api/pin_user_pages.rst for details.
eddb1c22 3035 */
64019a2e 3036long pin_user_pages_remote(struct mm_struct *mm,
eddb1c22
JH
3037 unsigned long start, unsigned long nr_pages,
3038 unsigned int gup_flags, struct page **pages,
3039 struct vm_area_struct **vmas, int *locked)
3040{
3faa52c0
JH
3041 /* FOLL_GET and FOLL_PIN are mutually exclusive. */
3042 if (WARN_ON_ONCE(gup_flags & FOLL_GET))
3043 return -EINVAL;
3044
3045 gup_flags |= FOLL_PIN;
64019a2e 3046 return __get_user_pages_remote(mm, start, nr_pages, gup_flags,
3faa52c0 3047 pages, vmas, locked);
eddb1c22
JH
3048}
3049EXPORT_SYMBOL(pin_user_pages_remote);
3050
3051/**
3052 * pin_user_pages() - pin user pages in memory for use by other devices
3053 *
3faa52c0
JH
3054 * @start: starting user address
3055 * @nr_pages: number of pages from start to pin
3056 * @gup_flags: flags modifying lookup behaviour
3057 * @pages: array that receives pointers to the pages pinned.
3058 * Should be at least nr_pages long. Or NULL, if caller
3059 * only intends to ensure the pages are faulted in.
3060 * @vmas: array of pointers to vmas corresponding to each page.
3061 * Or NULL if the caller does not require them.
3062 *
3063 * Nearly the same as get_user_pages(), except that FOLL_TOUCH is not set, and
3064 * FOLL_PIN is set.
3065 *
3066 * FOLL_PIN means that the pages must be released via unpin_user_page(). Please
72ef5e52 3067 * see Documentation/core-api/pin_user_pages.rst for details.
eddb1c22
JH
3068 */
3069long pin_user_pages(unsigned long start, unsigned long nr_pages,
3070 unsigned int gup_flags, struct page **pages,
3071 struct vm_area_struct **vmas)
3072{
3faa52c0
JH
3073 /* FOLL_GET and FOLL_PIN are mutually exclusive. */
3074 if (WARN_ON_ONCE(gup_flags & FOLL_GET))
3075 return -EINVAL;
3076
3077 gup_flags |= FOLL_PIN;
64019a2e 3078 return __gup_longterm_locked(current->mm, start, nr_pages,
3faa52c0 3079 pages, vmas, gup_flags);
eddb1c22
JH
3080}
3081EXPORT_SYMBOL(pin_user_pages);
91429023
JH
3082
3083/*
3084 * pin_user_pages_unlocked() is the FOLL_PIN variant of
3085 * get_user_pages_unlocked(). Behavior is the same, except that this one sets
3086 * FOLL_PIN and rejects FOLL_GET.
3087 */
3088long pin_user_pages_unlocked(unsigned long start, unsigned long nr_pages,
3089 struct page **pages, unsigned int gup_flags)
3090{
3091 /* FOLL_GET and FOLL_PIN are mutually exclusive. */
3092 if (WARN_ON_ONCE(gup_flags & FOLL_GET))
3093 return -EINVAL;
3094
3095 gup_flags |= FOLL_PIN;
3096 return get_user_pages_unlocked(start, nr_pages, pages, gup_flags);
3097}
3098EXPORT_SYMBOL(pin_user_pages_unlocked);