mm: generalize the pgmap based page_free infrastructure
[linux-block.git] / mm / gup.c
CommitLineData
457c8996 1// SPDX-License-Identifier: GPL-2.0-only
4bbd4c77
KS
2#include <linux/kernel.h>
3#include <linux/errno.h>
4#include <linux/err.h>
5#include <linux/spinlock.h>
6
4bbd4c77 7#include <linux/mm.h>
3565fce3 8#include <linux/memremap.h>
4bbd4c77
KS
9#include <linux/pagemap.h>
10#include <linux/rmap.h>
11#include <linux/swap.h>
12#include <linux/swapops.h>
1507f512 13#include <linux/secretmem.h>
4bbd4c77 14
174cd4b1 15#include <linux/sched/signal.h>
2667f50e 16#include <linux/rwsem.h>
f30c59e9 17#include <linux/hugetlb.h>
9a4e9f3b
AK
18#include <linux/migrate.h>
19#include <linux/mm_inline.h>
20#include <linux/sched/mm.h>
1027e443 21
33a709b2 22#include <asm/mmu_context.h>
1027e443 23#include <asm/tlbflush.h>
2667f50e 24
4bbd4c77
KS
25#include "internal.h"
26
df06b37f
KB
27struct follow_page_context {
28 struct dev_pagemap *pgmap;
29 unsigned int page_mask;
30};
31
47e29d32
JH
32static void hpage_pincount_add(struct page *page, int refs)
33{
34 VM_BUG_ON_PAGE(!hpage_pincount_available(page), page);
35 VM_BUG_ON_PAGE(page != compound_head(page), page);
36
37 atomic_add(refs, compound_pincount_ptr(page));
38}
39
40static void hpage_pincount_sub(struct page *page, int refs)
41{
42 VM_BUG_ON_PAGE(!hpage_pincount_available(page), page);
43 VM_BUG_ON_PAGE(page != compound_head(page), page);
44
45 atomic_sub(refs, compound_pincount_ptr(page));
46}
47
c24d3732
JH
48/* Equivalent to calling put_page() @refs times. */
49static void put_page_refs(struct page *page, int refs)
50{
51#ifdef CONFIG_DEBUG_VM
52 if (VM_WARN_ON_ONCE_PAGE(page_ref_count(page) < refs, page))
53 return;
54#endif
55
56 /*
57 * Calling put_page() for each ref is unnecessarily slow. Only the last
58 * ref needs a put_page().
59 */
60 if (refs > 1)
61 page_ref_sub(page, refs - 1);
62 put_page(page);
63}
64
cd1adf1b
LT
65/*
66 * Return the compound head page with ref appropriately incremented,
67 * or NULL if that failed.
a707cdd5 68 */
cd1adf1b 69static inline struct page *try_get_compound_head(struct page *page, int refs)
a707cdd5
JH
70{
71 struct page *head = compound_head(page);
72
73 if (WARN_ON_ONCE(page_ref_count(head) < 0))
74 return NULL;
75 if (unlikely(!page_cache_add_speculative(head, refs)))
76 return NULL;
c24d3732
JH
77
78 /*
79 * At this point we have a stable reference to the head page; but it
80 * could be that between the compound_head() lookup and the refcount
81 * increment, the compound page was split, in which case we'd end up
82 * holding a reference on a page that has nothing to do with the page
83 * we were given anymore.
84 * So now that the head page is stable, recheck that the pages still
85 * belong together.
86 */
87 if (unlikely(compound_head(page) != head)) {
88 put_page_refs(head, refs);
89 return NULL;
90 }
91
a707cdd5
JH
92 return head;
93}
94
3967db22 95/**
3faa52c0
JH
96 * try_grab_compound_head() - attempt to elevate a page's refcount, by a
97 * flags-dependent amount.
98 *
3967db22
JH
99 * Even though the name includes "compound_head", this function is still
100 * appropriate for callers that have a non-compound @page to get.
101 *
102 * @page: pointer to page to be grabbed
103 * @refs: the value to (effectively) add to the page's refcount
104 * @flags: gup flags: these are the FOLL_* flag values.
105 *
3faa52c0
JH
106 * "grab" names in this file mean, "look at flags to decide whether to use
107 * FOLL_PIN or FOLL_GET behavior, when incrementing the page's refcount.
108 *
109 * Either FOLL_PIN or FOLL_GET (or neither) must be set, but not both at the
110 * same time. (That's true throughout the get_user_pages*() and
111 * pin_user_pages*() APIs.) Cases:
112 *
3967db22
JH
113 * FOLL_GET: page's refcount will be incremented by @refs.
114 *
115 * FOLL_PIN on compound pages that are > two pages long: page's refcount will
116 * be incremented by @refs, and page[2].hpage_pinned_refcount will be
117 * incremented by @refs * GUP_PIN_COUNTING_BIAS.
118 *
119 * FOLL_PIN on normal pages, or compound pages that are two pages long:
120 * page's refcount will be incremented by @refs * GUP_PIN_COUNTING_BIAS.
3faa52c0
JH
121 *
122 * Return: head page (with refcount appropriately incremented) for success, or
123 * NULL upon failure. If neither FOLL_GET nor FOLL_PIN was set, that's
124 * considered failure, and furthermore, a likely bug in the caller, so a warning
125 * is also emitted.
126 */
c36c04c2
JH
127__maybe_unused struct page *try_grab_compound_head(struct page *page,
128 int refs, unsigned int flags)
3faa52c0
JH
129{
130 if (flags & FOLL_GET)
131 return try_get_compound_head(page, refs);
132 else if (flags & FOLL_PIN) {
df3a0a21 133 /*
d1e153fe
PT
134 * Can't do FOLL_LONGTERM + FOLL_PIN gup fast path if not in a
135 * right zone, so fail and let the caller fall back to the slow
136 * path.
df3a0a21 137 */
d1e153fe
PT
138 if (unlikely((flags & FOLL_LONGTERM) &&
139 !is_pinnable_page(page)))
df3a0a21
PL
140 return NULL;
141
c24d3732
JH
142 /*
143 * CAUTION: Don't use compound_head() on the page before this
144 * point, the result won't be stable.
145 */
146 page = try_get_compound_head(page, refs);
147 if (!page)
148 return NULL;
149
47e29d32
JH
150 /*
151 * When pinning a compound page of order > 1 (which is what
152 * hpage_pincount_available() checks for), use an exact count to
153 * track it, via hpage_pincount_add/_sub().
154 *
155 * However, be sure to *also* increment the normal page refcount
156 * field at least once, so that the page really is pinned.
3967db22
JH
157 * That's why the refcount from the earlier
158 * try_get_compound_head() is left intact.
47e29d32 159 */
47e29d32
JH
160 if (hpage_pincount_available(page))
161 hpage_pincount_add(page, refs);
c24d3732
JH
162 else
163 page_ref_add(page, refs * (GUP_PIN_COUNTING_BIAS - 1));
47e29d32 164
1970dc6f 165 mod_node_page_state(page_pgdat(page), NR_FOLL_PIN_ACQUIRED,
0fef147b 166 refs);
1970dc6f 167
47e29d32 168 return page;
3faa52c0
JH
169 }
170
171 WARN_ON_ONCE(1);
172 return NULL;
173}
174
4509b42c
JG
175static void put_compound_head(struct page *page, int refs, unsigned int flags)
176{
177 if (flags & FOLL_PIN) {
178 mod_node_page_state(page_pgdat(page), NR_FOLL_PIN_RELEASED,
179 refs);
180
181 if (hpage_pincount_available(page))
182 hpage_pincount_sub(page, refs);
183 else
184 refs *= GUP_PIN_COUNTING_BIAS;
185 }
186
c24d3732 187 put_page_refs(page, refs);
4509b42c
JG
188}
189
3faa52c0
JH
190/**
191 * try_grab_page() - elevate a page's refcount by a flag-dependent amount
192 *
193 * This might not do anything at all, depending on the flags argument.
194 *
195 * "grab" names in this file mean, "look at flags to decide whether to use
196 * FOLL_PIN or FOLL_GET behavior, when incrementing the page's refcount.
197 *
198 * @page: pointer to page to be grabbed
199 * @flags: gup flags: these are the FOLL_* flag values.
200 *
201 * Either FOLL_PIN or FOLL_GET (or neither) may be set, but not both at the same
3967db22
JH
202 * time. Cases: please see the try_grab_compound_head() documentation, with
203 * "refs=1".
3faa52c0
JH
204 *
205 * Return: true for success, or if no action was required (if neither FOLL_PIN
206 * nor FOLL_GET was set, nothing is done). False for failure: FOLL_GET or
207 * FOLL_PIN was set, but the page could not be grabbed.
208 */
209bool __must_check try_grab_page(struct page *page, unsigned int flags)
210{
c36c04c2 211 WARN_ON_ONCE((flags & (FOLL_GET | FOLL_PIN)) == (FOLL_GET | FOLL_PIN));
3faa52c0 212
c36c04c2
JH
213 if (flags & FOLL_GET)
214 return try_get_page(page);
215 else if (flags & FOLL_PIN) {
216 int refs = 1;
217
218 page = compound_head(page);
219
220 if (WARN_ON_ONCE(page_ref_count(page) <= 0))
221 return false;
222
223 if (hpage_pincount_available(page))
224 hpage_pincount_add(page, 1);
225 else
226 refs = GUP_PIN_COUNTING_BIAS;
227
228 /*
229 * Similar to try_grab_compound_head(): even if using the
230 * hpage_pincount_add/_sub() routines, be sure to
231 * *also* increment the normal page refcount field at least
232 * once, so that the page really is pinned.
233 */
234 page_ref_add(page, refs);
235
236 mod_node_page_state(page_pgdat(page), NR_FOLL_PIN_ACQUIRED, 1);
237 }
238
239 return true;
3faa52c0
JH
240}
241
3faa52c0
JH
242/**
243 * unpin_user_page() - release a dma-pinned page
244 * @page: pointer to page to be released
245 *
246 * Pages that were pinned via pin_user_pages*() must be released via either
247 * unpin_user_page(), or one of the unpin_user_pages*() routines. This is so
248 * that such pages can be separately tracked and uniquely handled. In
249 * particular, interactions with RDMA and filesystems need special handling.
250 */
251void unpin_user_page(struct page *page)
252{
4509b42c 253 put_compound_head(compound_head(page), 1, FOLL_PIN);
3faa52c0
JH
254}
255EXPORT_SYMBOL(unpin_user_page);
256
458a4f78
JM
257static inline void compound_range_next(unsigned long i, unsigned long npages,
258 struct page **list, struct page **head,
259 unsigned int *ntails)
260{
261 struct page *next, *page;
262 unsigned int nr = 1;
263
264 if (i >= npages)
265 return;
266
267 next = *list + i;
268 page = compound_head(next);
269 if (PageCompound(page) && compound_order(page) >= 1)
270 nr = min_t(unsigned int,
271 page + compound_nr(page) - next, npages - i);
272
273 *head = page;
274 *ntails = nr;
275}
276
277#define for_each_compound_range(__i, __list, __npages, __head, __ntails) \
278 for (__i = 0, \
279 compound_range_next(__i, __npages, __list, &(__head), &(__ntails)); \
280 __i < __npages; __i += __ntails, \
281 compound_range_next(__i, __npages, __list, &(__head), &(__ntails)))
282
8745d7f6
JM
283static inline void compound_next(unsigned long i, unsigned long npages,
284 struct page **list, struct page **head,
285 unsigned int *ntails)
286{
287 struct page *page;
288 unsigned int nr;
289
290 if (i >= npages)
291 return;
292
293 page = compound_head(list[i]);
294 for (nr = i + 1; nr < npages; nr++) {
295 if (compound_head(list[nr]) != page)
296 break;
297 }
298
299 *head = page;
300 *ntails = nr - i;
301}
302
303#define for_each_compound_head(__i, __list, __npages, __head, __ntails) \
304 for (__i = 0, \
305 compound_next(__i, __npages, __list, &(__head), &(__ntails)); \
306 __i < __npages; __i += __ntails, \
307 compound_next(__i, __npages, __list, &(__head), &(__ntails)))
308
fc1d8e7c 309/**
f1f6a7dd 310 * unpin_user_pages_dirty_lock() - release and optionally dirty gup-pinned pages
2d15eb31 311 * @pages: array of pages to be maybe marked dirty, and definitely released.
fc1d8e7c 312 * @npages: number of pages in the @pages array.
2d15eb31 313 * @make_dirty: whether to mark the pages dirty
fc1d8e7c
JH
314 *
315 * "gup-pinned page" refers to a page that has had one of the get_user_pages()
316 * variants called on that page.
317 *
318 * For each page in the @pages array, make that page (or its head page, if a
2d15eb31 319 * compound page) dirty, if @make_dirty is true, and if the page was previously
f1f6a7dd
JH
320 * listed as clean. In any case, releases all pages using unpin_user_page(),
321 * possibly via unpin_user_pages(), for the non-dirty case.
fc1d8e7c 322 *
f1f6a7dd 323 * Please see the unpin_user_page() documentation for details.
fc1d8e7c 324 *
2d15eb31 325 * set_page_dirty_lock() is used internally. If instead, set_page_dirty() is
326 * required, then the caller should a) verify that this is really correct,
327 * because _lock() is usually required, and b) hand code it:
f1f6a7dd 328 * set_page_dirty_lock(), unpin_user_page().
fc1d8e7c
JH
329 *
330 */
f1f6a7dd
JH
331void unpin_user_pages_dirty_lock(struct page **pages, unsigned long npages,
332 bool make_dirty)
fc1d8e7c 333{
2d15eb31 334 unsigned long index;
31b912de
JM
335 struct page *head;
336 unsigned int ntails;
2d15eb31 337
338 if (!make_dirty) {
f1f6a7dd 339 unpin_user_pages(pages, npages);
2d15eb31 340 return;
341 }
342
31b912de 343 for_each_compound_head(index, pages, npages, head, ntails) {
2d15eb31 344 /*
345 * Checking PageDirty at this point may race with
346 * clear_page_dirty_for_io(), but that's OK. Two key
347 * cases:
348 *
349 * 1) This code sees the page as already dirty, so it
350 * skips the call to set_page_dirty(). That could happen
351 * because clear_page_dirty_for_io() called
352 * page_mkclean(), followed by set_page_dirty().
353 * However, now the page is going to get written back,
354 * which meets the original intention of setting it
355 * dirty, so all is well: clear_page_dirty_for_io() goes
356 * on to call TestClearPageDirty(), and write the page
357 * back.
358 *
359 * 2) This code sees the page as clean, so it calls
360 * set_page_dirty(). The page stays dirty, despite being
361 * written back, so it gets written back again in the
362 * next writeback cycle. This is harmless.
363 */
31b912de
JM
364 if (!PageDirty(head))
365 set_page_dirty_lock(head);
366 put_compound_head(head, ntails, FOLL_PIN);
2d15eb31 367 }
fc1d8e7c 368}
f1f6a7dd 369EXPORT_SYMBOL(unpin_user_pages_dirty_lock);
fc1d8e7c 370
458a4f78
JM
371/**
372 * unpin_user_page_range_dirty_lock() - release and optionally dirty
373 * gup-pinned page range
374 *
375 * @page: the starting page of a range maybe marked dirty, and definitely released.
376 * @npages: number of consecutive pages to release.
377 * @make_dirty: whether to mark the pages dirty
378 *
379 * "gup-pinned page range" refers to a range of pages that has had one of the
380 * pin_user_pages() variants called on that page.
381 *
382 * For the page ranges defined by [page .. page+npages], make that range (or
383 * its head pages, if a compound page) dirty, if @make_dirty is true, and if the
384 * page range was previously listed as clean.
385 *
386 * set_page_dirty_lock() is used internally. If instead, set_page_dirty() is
387 * required, then the caller should a) verify that this is really correct,
388 * because _lock() is usually required, and b) hand code it:
389 * set_page_dirty_lock(), unpin_user_page().
390 *
391 */
392void unpin_user_page_range_dirty_lock(struct page *page, unsigned long npages,
393 bool make_dirty)
394{
395 unsigned long index;
396 struct page *head;
397 unsigned int ntails;
398
399 for_each_compound_range(index, &page, npages, head, ntails) {
400 if (make_dirty && !PageDirty(head))
401 set_page_dirty_lock(head);
402 put_compound_head(head, ntails, FOLL_PIN);
403 }
404}
405EXPORT_SYMBOL(unpin_user_page_range_dirty_lock);
406
fc1d8e7c 407/**
f1f6a7dd 408 * unpin_user_pages() - release an array of gup-pinned pages.
fc1d8e7c
JH
409 * @pages: array of pages to be marked dirty and released.
410 * @npages: number of pages in the @pages array.
411 *
f1f6a7dd 412 * For each page in the @pages array, release the page using unpin_user_page().
fc1d8e7c 413 *
f1f6a7dd 414 * Please see the unpin_user_page() documentation for details.
fc1d8e7c 415 */
f1f6a7dd 416void unpin_user_pages(struct page **pages, unsigned long npages)
fc1d8e7c
JH
417{
418 unsigned long index;
31b912de
JM
419 struct page *head;
420 unsigned int ntails;
fc1d8e7c 421
146608bb
JH
422 /*
423 * If this WARN_ON() fires, then the system *might* be leaking pages (by
424 * leaving them pinned), but probably not. More likely, gup/pup returned
425 * a hard -ERRNO error to the caller, who erroneously passed it here.
426 */
427 if (WARN_ON(IS_ERR_VALUE(npages)))
428 return;
31b912de
JM
429
430 for_each_compound_head(index, pages, npages, head, ntails)
431 put_compound_head(head, ntails, FOLL_PIN);
fc1d8e7c 432}
f1f6a7dd 433EXPORT_SYMBOL(unpin_user_pages);
fc1d8e7c 434
a458b76a
AA
435/*
436 * Set the MMF_HAS_PINNED if not set yet; after set it'll be there for the mm's
437 * lifecycle. Avoid setting the bit unless necessary, or it might cause write
438 * cache bouncing on large SMP machines for concurrent pinned gups.
439 */
440static inline void mm_set_has_pinned_flag(unsigned long *mm_flags)
441{
442 if (!test_bit(MMF_HAS_PINNED, mm_flags))
443 set_bit(MMF_HAS_PINNED, mm_flags);
444}
445
050a9adc 446#ifdef CONFIG_MMU
69e68b4f
KS
447static struct page *no_page_table(struct vm_area_struct *vma,
448 unsigned int flags)
4bbd4c77 449{
69e68b4f
KS
450 /*
451 * When core dumping an enormous anonymous area that nobody
452 * has touched so far, we don't want to allocate unnecessary pages or
453 * page tables. Return error instead of NULL to skip handle_mm_fault,
454 * then get_dump_page() will return NULL to leave a hole in the dump.
455 * But we can only make this optimization where a hole would surely
456 * be zero-filled if handle_mm_fault() actually did handle it.
457 */
a0137f16
AK
458 if ((flags & FOLL_DUMP) &&
459 (vma_is_anonymous(vma) || !vma->vm_ops->fault))
69e68b4f
KS
460 return ERR_PTR(-EFAULT);
461 return NULL;
462}
4bbd4c77 463
1027e443
KS
464static int follow_pfn_pte(struct vm_area_struct *vma, unsigned long address,
465 pte_t *pte, unsigned int flags)
466{
467 /* No page to get reference */
468 if (flags & FOLL_GET)
469 return -EFAULT;
470
471 if (flags & FOLL_TOUCH) {
472 pte_t entry = *pte;
473
474 if (flags & FOLL_WRITE)
475 entry = pte_mkdirty(entry);
476 entry = pte_mkyoung(entry);
477
478 if (!pte_same(*pte, entry)) {
479 set_pte_at(vma->vm_mm, address, pte, entry);
480 update_mmu_cache(vma, address, pte);
481 }
482 }
483
484 /* Proper page table entry exists, but no corresponding struct page */
485 return -EEXIST;
486}
487
19be0eaf 488/*
a308c71b
PX
489 * FOLL_FORCE can write to even unwritable pte's, but only
490 * after we've gone through a COW cycle and they are dirty.
19be0eaf
LT
491 */
492static inline bool can_follow_write_pte(pte_t pte, unsigned int flags)
493{
a308c71b
PX
494 return pte_write(pte) ||
495 ((flags & FOLL_FORCE) && (flags & FOLL_COW) && pte_dirty(pte));
19be0eaf
LT
496}
497
69e68b4f 498static struct page *follow_page_pte(struct vm_area_struct *vma,
df06b37f
KB
499 unsigned long address, pmd_t *pmd, unsigned int flags,
500 struct dev_pagemap **pgmap)
69e68b4f
KS
501{
502 struct mm_struct *mm = vma->vm_mm;
503 struct page *page;
504 spinlock_t *ptl;
505 pte_t *ptep, pte;
f28d4363 506 int ret;
4bbd4c77 507
eddb1c22
JH
508 /* FOLL_GET and FOLL_PIN are mutually exclusive. */
509 if (WARN_ON_ONCE((flags & (FOLL_PIN | FOLL_GET)) ==
510 (FOLL_PIN | FOLL_GET)))
511 return ERR_PTR(-EINVAL);
69e68b4f 512retry:
4bbd4c77 513 if (unlikely(pmd_bad(*pmd)))
69e68b4f 514 return no_page_table(vma, flags);
4bbd4c77
KS
515
516 ptep = pte_offset_map_lock(mm, pmd, address, &ptl);
4bbd4c77
KS
517 pte = *ptep;
518 if (!pte_present(pte)) {
519 swp_entry_t entry;
520 /*
521 * KSM's break_ksm() relies upon recognizing a ksm page
522 * even while it is being migrated, so for that case we
523 * need migration_entry_wait().
524 */
525 if (likely(!(flags & FOLL_MIGRATION)))
526 goto no_page;
0661a336 527 if (pte_none(pte))
4bbd4c77
KS
528 goto no_page;
529 entry = pte_to_swp_entry(pte);
530 if (!is_migration_entry(entry))
531 goto no_page;
532 pte_unmap_unlock(ptep, ptl);
533 migration_entry_wait(mm, pmd, address);
69e68b4f 534 goto retry;
4bbd4c77 535 }
8a0516ed 536 if ((flags & FOLL_NUMA) && pte_protnone(pte))
4bbd4c77 537 goto no_page;
19be0eaf 538 if ((flags & FOLL_WRITE) && !can_follow_write_pte(pte, flags)) {
69e68b4f
KS
539 pte_unmap_unlock(ptep, ptl);
540 return NULL;
541 }
4bbd4c77
KS
542
543 page = vm_normal_page(vma, address, pte);
3faa52c0 544 if (!page && pte_devmap(pte) && (flags & (FOLL_GET | FOLL_PIN))) {
3565fce3 545 /*
3faa52c0
JH
546 * Only return device mapping pages in the FOLL_GET or FOLL_PIN
547 * case since they are only valid while holding the pgmap
548 * reference.
3565fce3 549 */
df06b37f
KB
550 *pgmap = get_dev_pagemap(pte_pfn(pte), *pgmap);
551 if (*pgmap)
3565fce3
DW
552 page = pte_page(pte);
553 else
554 goto no_page;
555 } else if (unlikely(!page)) {
1027e443
KS
556 if (flags & FOLL_DUMP) {
557 /* Avoid special (like zero) pages in core dumps */
558 page = ERR_PTR(-EFAULT);
559 goto out;
560 }
561
562 if (is_zero_pfn(pte_pfn(pte))) {
563 page = pte_page(pte);
564 } else {
1027e443
KS
565 ret = follow_pfn_pte(vma, address, ptep, flags);
566 page = ERR_PTR(ret);
567 goto out;
568 }
4bbd4c77
KS
569 }
570
3faa52c0
JH
571 /* try_grab_page() does nothing unless FOLL_GET or FOLL_PIN is set. */
572 if (unlikely(!try_grab_page(page, flags))) {
573 page = ERR_PTR(-ENOMEM);
574 goto out;
8fde12ca 575 }
f28d4363
CI
576 /*
577 * We need to make the page accessible if and only if we are going
578 * to access its content (the FOLL_PIN case). Please see
579 * Documentation/core-api/pin_user_pages.rst for details.
580 */
581 if (flags & FOLL_PIN) {
582 ret = arch_make_page_accessible(page);
583 if (ret) {
584 unpin_user_page(page);
585 page = ERR_PTR(ret);
586 goto out;
587 }
588 }
4bbd4c77
KS
589 if (flags & FOLL_TOUCH) {
590 if ((flags & FOLL_WRITE) &&
591 !pte_dirty(pte) && !PageDirty(page))
592 set_page_dirty(page);
593 /*
594 * pte_mkyoung() would be more correct here, but atomic care
595 * is needed to avoid losing the dirty bit: it is easier to use
596 * mark_page_accessed().
597 */
598 mark_page_accessed(page);
599 }
1027e443 600out:
4bbd4c77 601 pte_unmap_unlock(ptep, ptl);
4bbd4c77 602 return page;
4bbd4c77
KS
603no_page:
604 pte_unmap_unlock(ptep, ptl);
605 if (!pte_none(pte))
69e68b4f
KS
606 return NULL;
607 return no_page_table(vma, flags);
608}
609
080dbb61
AK
610static struct page *follow_pmd_mask(struct vm_area_struct *vma,
611 unsigned long address, pud_t *pudp,
df06b37f
KB
612 unsigned int flags,
613 struct follow_page_context *ctx)
69e68b4f 614{
68827280 615 pmd_t *pmd, pmdval;
69e68b4f
KS
616 spinlock_t *ptl;
617 struct page *page;
618 struct mm_struct *mm = vma->vm_mm;
619
080dbb61 620 pmd = pmd_offset(pudp, address);
68827280
HY
621 /*
622 * The READ_ONCE() will stabilize the pmdval in a register or
623 * on the stack so that it will stop changing under the code.
624 */
625 pmdval = READ_ONCE(*pmd);
626 if (pmd_none(pmdval))
69e68b4f 627 return no_page_table(vma, flags);
be9d3045 628 if (pmd_huge(pmdval) && is_vm_hugetlb_page(vma)) {
e66f17ff
NH
629 page = follow_huge_pmd(mm, address, pmd, flags);
630 if (page)
631 return page;
632 return no_page_table(vma, flags);
69e68b4f 633 }
68827280 634 if (is_hugepd(__hugepd(pmd_val(pmdval)))) {
4dc71451 635 page = follow_huge_pd(vma, address,
68827280 636 __hugepd(pmd_val(pmdval)), flags,
4dc71451
AK
637 PMD_SHIFT);
638 if (page)
639 return page;
640 return no_page_table(vma, flags);
641 }
84c3fc4e 642retry:
68827280 643 if (!pmd_present(pmdval)) {
28b0ee3f
LX
644 /*
645 * Should never reach here, if thp migration is not supported;
646 * Otherwise, it must be a thp migration entry.
647 */
648 VM_BUG_ON(!thp_migration_supported() ||
649 !is_pmd_migration_entry(pmdval));
650
84c3fc4e
ZY
651 if (likely(!(flags & FOLL_MIGRATION)))
652 return no_page_table(vma, flags);
28b0ee3f
LX
653
654 pmd_migration_entry_wait(mm, pmd);
68827280
HY
655 pmdval = READ_ONCE(*pmd);
656 /*
657 * MADV_DONTNEED may convert the pmd to null because
c1e8d7c6 658 * mmap_lock is held in read mode
68827280
HY
659 */
660 if (pmd_none(pmdval))
661 return no_page_table(vma, flags);
84c3fc4e
ZY
662 goto retry;
663 }
68827280 664 if (pmd_devmap(pmdval)) {
3565fce3 665 ptl = pmd_lock(mm, pmd);
df06b37f 666 page = follow_devmap_pmd(vma, address, pmd, flags, &ctx->pgmap);
3565fce3
DW
667 spin_unlock(ptl);
668 if (page)
669 return page;
670 }
68827280 671 if (likely(!pmd_trans_huge(pmdval)))
df06b37f 672 return follow_page_pte(vma, address, pmd, flags, &ctx->pgmap);
6742d293 673
68827280 674 if ((flags & FOLL_NUMA) && pmd_protnone(pmdval))
db08f203
AK
675 return no_page_table(vma, flags);
676
84c3fc4e 677retry_locked:
6742d293 678 ptl = pmd_lock(mm, pmd);
68827280
HY
679 if (unlikely(pmd_none(*pmd))) {
680 spin_unlock(ptl);
681 return no_page_table(vma, flags);
682 }
84c3fc4e
ZY
683 if (unlikely(!pmd_present(*pmd))) {
684 spin_unlock(ptl);
685 if (likely(!(flags & FOLL_MIGRATION)))
686 return no_page_table(vma, flags);
687 pmd_migration_entry_wait(mm, pmd);
688 goto retry_locked;
689 }
6742d293
KS
690 if (unlikely(!pmd_trans_huge(*pmd))) {
691 spin_unlock(ptl);
df06b37f 692 return follow_page_pte(vma, address, pmd, flags, &ctx->pgmap);
6742d293 693 }
4066c119 694 if (flags & FOLL_SPLIT_PMD) {
6742d293
KS
695 int ret;
696 page = pmd_page(*pmd);
697 if (is_huge_zero_page(page)) {
698 spin_unlock(ptl);
699 ret = 0;
78ddc534 700 split_huge_pmd(vma, pmd, address);
337d9abf
NH
701 if (pmd_trans_unstable(pmd))
702 ret = -EBUSY;
4066c119 703 } else {
bfe7b00d
SL
704 spin_unlock(ptl);
705 split_huge_pmd(vma, pmd, address);
706 ret = pte_alloc(mm, pmd) ? -ENOMEM : 0;
6742d293
KS
707 }
708
709 return ret ? ERR_PTR(ret) :
df06b37f 710 follow_page_pte(vma, address, pmd, flags, &ctx->pgmap);
69e68b4f 711 }
6742d293
KS
712 page = follow_trans_huge_pmd(vma, address, pmd, flags);
713 spin_unlock(ptl);
df06b37f 714 ctx->page_mask = HPAGE_PMD_NR - 1;
6742d293 715 return page;
4bbd4c77
KS
716}
717
080dbb61
AK
718static struct page *follow_pud_mask(struct vm_area_struct *vma,
719 unsigned long address, p4d_t *p4dp,
df06b37f
KB
720 unsigned int flags,
721 struct follow_page_context *ctx)
080dbb61
AK
722{
723 pud_t *pud;
724 spinlock_t *ptl;
725 struct page *page;
726 struct mm_struct *mm = vma->vm_mm;
727
728 pud = pud_offset(p4dp, address);
729 if (pud_none(*pud))
730 return no_page_table(vma, flags);
be9d3045 731 if (pud_huge(*pud) && is_vm_hugetlb_page(vma)) {
080dbb61
AK
732 page = follow_huge_pud(mm, address, pud, flags);
733 if (page)
734 return page;
735 return no_page_table(vma, flags);
736 }
4dc71451
AK
737 if (is_hugepd(__hugepd(pud_val(*pud)))) {
738 page = follow_huge_pd(vma, address,
739 __hugepd(pud_val(*pud)), flags,
740 PUD_SHIFT);
741 if (page)
742 return page;
743 return no_page_table(vma, flags);
744 }
080dbb61
AK
745 if (pud_devmap(*pud)) {
746 ptl = pud_lock(mm, pud);
df06b37f 747 page = follow_devmap_pud(vma, address, pud, flags, &ctx->pgmap);
080dbb61
AK
748 spin_unlock(ptl);
749 if (page)
750 return page;
751 }
752 if (unlikely(pud_bad(*pud)))
753 return no_page_table(vma, flags);
754
df06b37f 755 return follow_pmd_mask(vma, address, pud, flags, ctx);
080dbb61
AK
756}
757
080dbb61
AK
758static struct page *follow_p4d_mask(struct vm_area_struct *vma,
759 unsigned long address, pgd_t *pgdp,
df06b37f
KB
760 unsigned int flags,
761 struct follow_page_context *ctx)
080dbb61
AK
762{
763 p4d_t *p4d;
4dc71451 764 struct page *page;
080dbb61
AK
765
766 p4d = p4d_offset(pgdp, address);
767 if (p4d_none(*p4d))
768 return no_page_table(vma, flags);
769 BUILD_BUG_ON(p4d_huge(*p4d));
770 if (unlikely(p4d_bad(*p4d)))
771 return no_page_table(vma, flags);
772
4dc71451
AK
773 if (is_hugepd(__hugepd(p4d_val(*p4d)))) {
774 page = follow_huge_pd(vma, address,
775 __hugepd(p4d_val(*p4d)), flags,
776 P4D_SHIFT);
777 if (page)
778 return page;
779 return no_page_table(vma, flags);
780 }
df06b37f 781 return follow_pud_mask(vma, address, p4d, flags, ctx);
080dbb61
AK
782}
783
784/**
785 * follow_page_mask - look up a page descriptor from a user-virtual address
786 * @vma: vm_area_struct mapping @address
787 * @address: virtual address to look up
788 * @flags: flags modifying lookup behaviour
78179556
MR
789 * @ctx: contains dev_pagemap for %ZONE_DEVICE memory pinning and a
790 * pointer to output page_mask
080dbb61
AK
791 *
792 * @flags can have FOLL_ flags set, defined in <linux/mm.h>
793 *
78179556
MR
794 * When getting pages from ZONE_DEVICE memory, the @ctx->pgmap caches
795 * the device's dev_pagemap metadata to avoid repeating expensive lookups.
796 *
797 * On output, the @ctx->page_mask is set according to the size of the page.
798 *
799 * Return: the mapped (struct page *), %NULL if no mapping exists, or
080dbb61
AK
800 * an error pointer if there is a mapping to something not represented
801 * by a page descriptor (see also vm_normal_page()).
802 */
a7030aea 803static struct page *follow_page_mask(struct vm_area_struct *vma,
080dbb61 804 unsigned long address, unsigned int flags,
df06b37f 805 struct follow_page_context *ctx)
080dbb61
AK
806{
807 pgd_t *pgd;
808 struct page *page;
809 struct mm_struct *mm = vma->vm_mm;
810
df06b37f 811 ctx->page_mask = 0;
080dbb61
AK
812
813 /* make this handle hugepd */
814 page = follow_huge_addr(mm, address, flags & FOLL_WRITE);
815 if (!IS_ERR(page)) {
3faa52c0 816 WARN_ON_ONCE(flags & (FOLL_GET | FOLL_PIN));
080dbb61
AK
817 return page;
818 }
819
820 pgd = pgd_offset(mm, address);
821
822 if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd)))
823 return no_page_table(vma, flags);
824
faaa5b62
AK
825 if (pgd_huge(*pgd)) {
826 page = follow_huge_pgd(mm, address, pgd, flags);
827 if (page)
828 return page;
829 return no_page_table(vma, flags);
830 }
4dc71451
AK
831 if (is_hugepd(__hugepd(pgd_val(*pgd)))) {
832 page = follow_huge_pd(vma, address,
833 __hugepd(pgd_val(*pgd)), flags,
834 PGDIR_SHIFT);
835 if (page)
836 return page;
837 return no_page_table(vma, flags);
838 }
faaa5b62 839
df06b37f
KB
840 return follow_p4d_mask(vma, address, pgd, flags, ctx);
841}
842
843struct page *follow_page(struct vm_area_struct *vma, unsigned long address,
844 unsigned int foll_flags)
845{
846 struct follow_page_context ctx = { NULL };
847 struct page *page;
848
1507f512
MR
849 if (vma_is_secretmem(vma))
850 return NULL;
851
df06b37f
KB
852 page = follow_page_mask(vma, address, foll_flags, &ctx);
853 if (ctx.pgmap)
854 put_dev_pagemap(ctx.pgmap);
855 return page;
080dbb61
AK
856}
857
f2b495ca
KS
858static int get_gate_page(struct mm_struct *mm, unsigned long address,
859 unsigned int gup_flags, struct vm_area_struct **vma,
860 struct page **page)
861{
862 pgd_t *pgd;
c2febafc 863 p4d_t *p4d;
f2b495ca
KS
864 pud_t *pud;
865 pmd_t *pmd;
866 pte_t *pte;
867 int ret = -EFAULT;
868
869 /* user gate pages are read-only */
870 if (gup_flags & FOLL_WRITE)
871 return -EFAULT;
872 if (address > TASK_SIZE)
873 pgd = pgd_offset_k(address);
874 else
875 pgd = pgd_offset_gate(mm, address);
b5d1c39f
AL
876 if (pgd_none(*pgd))
877 return -EFAULT;
c2febafc 878 p4d = p4d_offset(pgd, address);
b5d1c39f
AL
879 if (p4d_none(*p4d))
880 return -EFAULT;
c2febafc 881 pud = pud_offset(p4d, address);
b5d1c39f
AL
882 if (pud_none(*pud))
883 return -EFAULT;
f2b495ca 884 pmd = pmd_offset(pud, address);
84c3fc4e 885 if (!pmd_present(*pmd))
f2b495ca
KS
886 return -EFAULT;
887 VM_BUG_ON(pmd_trans_huge(*pmd));
888 pte = pte_offset_map(pmd, address);
889 if (pte_none(*pte))
890 goto unmap;
891 *vma = get_gate_vma(mm);
892 if (!page)
893 goto out;
894 *page = vm_normal_page(*vma, address, *pte);
895 if (!*page) {
896 if ((gup_flags & FOLL_DUMP) || !is_zero_pfn(pte_pfn(*pte)))
897 goto unmap;
898 *page = pte_page(*pte);
899 }
9fa2dd94 900 if (unlikely(!try_grab_page(*page, gup_flags))) {
8fde12ca
LT
901 ret = -ENOMEM;
902 goto unmap;
903 }
f2b495ca
KS
904out:
905 ret = 0;
906unmap:
907 pte_unmap(pte);
908 return ret;
909}
910
9a95f3cf 911/*
c1e8d7c6
ML
912 * mmap_lock must be held on entry. If @locked != NULL and *@flags
913 * does not include FOLL_NOWAIT, the mmap_lock may be released. If it
4f6da934 914 * is, *@locked will be set to 0 and -EBUSY returned.
9a95f3cf 915 */
64019a2e 916static int faultin_page(struct vm_area_struct *vma,
4f6da934 917 unsigned long address, unsigned int *flags, int *locked)
16744483 918{
16744483 919 unsigned int fault_flags = 0;
2b740303 920 vm_fault_t ret;
16744483 921
55b8fe70
AG
922 if (*flags & FOLL_NOFAULT)
923 return -EFAULT;
16744483
KS
924 if (*flags & FOLL_WRITE)
925 fault_flags |= FAULT_FLAG_WRITE;
1b2ee126
DH
926 if (*flags & FOLL_REMOTE)
927 fault_flags |= FAULT_FLAG_REMOTE;
4f6da934 928 if (locked)
71335f37 929 fault_flags |= FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
16744483
KS
930 if (*flags & FOLL_NOWAIT)
931 fault_flags |= FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_RETRY_NOWAIT;
234b239b 932 if (*flags & FOLL_TRIED) {
4426e945
PX
933 /*
934 * Note: FAULT_FLAG_ALLOW_RETRY and FAULT_FLAG_TRIED
935 * can co-exist
936 */
234b239b
ALC
937 fault_flags |= FAULT_FLAG_TRIED;
938 }
16744483 939
bce617ed 940 ret = handle_mm_fault(vma, address, fault_flags, NULL);
16744483 941 if (ret & VM_FAULT_ERROR) {
9a291a7c
JM
942 int err = vm_fault_to_errno(ret, *flags);
943
944 if (err)
945 return err;
16744483
KS
946 BUG();
947 }
948
16744483 949 if (ret & VM_FAULT_RETRY) {
4f6da934
PX
950 if (locked && !(fault_flags & FAULT_FLAG_RETRY_NOWAIT))
951 *locked = 0;
16744483
KS
952 return -EBUSY;
953 }
954
955 /*
956 * The VM_FAULT_WRITE bit tells us that do_wp_page has broken COW when
957 * necessary, even if maybe_mkwrite decided not to set pte_write. We
958 * can thus safely do subsequent page lookups as if they were reads.
959 * But only do so when looping for pte_write is futile: in some cases
960 * userspace may also be wanting to write to the gotten user page,
961 * which a read fault here might prevent (a readonly page might get
962 * reCOWed by userspace write).
963 */
964 if ((ret & VM_FAULT_WRITE) && !(vma->vm_flags & VM_WRITE))
2923117b 965 *flags |= FOLL_COW;
16744483
KS
966 return 0;
967}
968
fa5bb209
KS
969static int check_vma_flags(struct vm_area_struct *vma, unsigned long gup_flags)
970{
971 vm_flags_t vm_flags = vma->vm_flags;
1b2ee126
DH
972 int write = (gup_flags & FOLL_WRITE);
973 int foreign = (gup_flags & FOLL_REMOTE);
fa5bb209
KS
974
975 if (vm_flags & (VM_IO | VM_PFNMAP))
976 return -EFAULT;
977
7f7ccc2c
WT
978 if (gup_flags & FOLL_ANON && !vma_is_anonymous(vma))
979 return -EFAULT;
980
52650c8b
JG
981 if ((gup_flags & FOLL_LONGTERM) && vma_is_fsdax(vma))
982 return -EOPNOTSUPP;
983
1507f512
MR
984 if (vma_is_secretmem(vma))
985 return -EFAULT;
986
1b2ee126 987 if (write) {
fa5bb209
KS
988 if (!(vm_flags & VM_WRITE)) {
989 if (!(gup_flags & FOLL_FORCE))
990 return -EFAULT;
991 /*
992 * We used to let the write,force case do COW in a
993 * VM_MAYWRITE VM_SHARED !VM_WRITE vma, so ptrace could
994 * set a breakpoint in a read-only mapping of an
995 * executable, without corrupting the file (yet only
996 * when that file had been opened for writing!).
997 * Anon pages in shared mappings are surprising: now
998 * just reject it.
999 */
46435364 1000 if (!is_cow_mapping(vm_flags))
fa5bb209 1001 return -EFAULT;
fa5bb209
KS
1002 }
1003 } else if (!(vm_flags & VM_READ)) {
1004 if (!(gup_flags & FOLL_FORCE))
1005 return -EFAULT;
1006 /*
1007 * Is there actually any vma we can reach here which does not
1008 * have VM_MAYREAD set?
1009 */
1010 if (!(vm_flags & VM_MAYREAD))
1011 return -EFAULT;
1012 }
d61172b4
DH
1013 /*
1014 * gups are always data accesses, not instruction
1015 * fetches, so execute=false here
1016 */
1017 if (!arch_vma_access_permitted(vma, write, false, foreign))
33a709b2 1018 return -EFAULT;
fa5bb209
KS
1019 return 0;
1020}
1021
4bbd4c77
KS
1022/**
1023 * __get_user_pages() - pin user pages in memory
4bbd4c77
KS
1024 * @mm: mm_struct of target mm
1025 * @start: starting user address
1026 * @nr_pages: number of pages from start to pin
1027 * @gup_flags: flags modifying pin behaviour
1028 * @pages: array that receives pointers to the pages pinned.
1029 * Should be at least nr_pages long. Or NULL, if caller
1030 * only intends to ensure the pages are faulted in.
1031 * @vmas: array of pointers to vmas corresponding to each page.
1032 * Or NULL if the caller does not require them.
c1e8d7c6 1033 * @locked: whether we're still with the mmap_lock held
4bbd4c77 1034 *
d2dfbe47
LX
1035 * Returns either number of pages pinned (which may be less than the
1036 * number requested), or an error. Details about the return value:
1037 *
1038 * -- If nr_pages is 0, returns 0.
1039 * -- If nr_pages is >0, but no pages were pinned, returns -errno.
1040 * -- If nr_pages is >0, and some pages were pinned, returns the number of
1041 * pages pinned. Again, this may be less than nr_pages.
2d3a36a4 1042 * -- 0 return value is possible when the fault would need to be retried.
d2dfbe47
LX
1043 *
1044 * The caller is responsible for releasing returned @pages, via put_page().
1045 *
c1e8d7c6 1046 * @vmas are valid only as long as mmap_lock is held.
4bbd4c77 1047 *
c1e8d7c6 1048 * Must be called with mmap_lock held. It may be released. See below.
4bbd4c77
KS
1049 *
1050 * __get_user_pages walks a process's page tables and takes a reference to
1051 * each struct page that each user address corresponds to at a given
1052 * instant. That is, it takes the page that would be accessed if a user
1053 * thread accesses the given user virtual address at that instant.
1054 *
1055 * This does not guarantee that the page exists in the user mappings when
1056 * __get_user_pages returns, and there may even be a completely different
1057 * page there in some cases (eg. if mmapped pagecache has been invalidated
1058 * and subsequently re faulted). However it does guarantee that the page
1059 * won't be freed completely. And mostly callers simply care that the page
1060 * contains data that was valid *at some point in time*. Typically, an IO
1061 * or similar operation cannot guarantee anything stronger anyway because
1062 * locks can't be held over the syscall boundary.
1063 *
1064 * If @gup_flags & FOLL_WRITE == 0, the page must not be written to. If
1065 * the page is written to, set_page_dirty (or set_page_dirty_lock, as
1066 * appropriate) must be called after the page is finished with, and
1067 * before put_page is called.
1068 *
c1e8d7c6 1069 * If @locked != NULL, *@locked will be set to 0 when mmap_lock is
4f6da934
PX
1070 * released by an up_read(). That can happen if @gup_flags does not
1071 * have FOLL_NOWAIT.
9a95f3cf 1072 *
4f6da934 1073 * A caller using such a combination of @locked and @gup_flags
c1e8d7c6 1074 * must therefore hold the mmap_lock for reading only, and recognize
9a95f3cf
PC
1075 * when it's been released. Otherwise, it must be held for either
1076 * reading or writing and will not be released.
4bbd4c77
KS
1077 *
1078 * In most cases, get_user_pages or get_user_pages_fast should be used
1079 * instead of __get_user_pages. __get_user_pages should be used only if
1080 * you need some special @gup_flags.
1081 */
64019a2e 1082static long __get_user_pages(struct mm_struct *mm,
4bbd4c77
KS
1083 unsigned long start, unsigned long nr_pages,
1084 unsigned int gup_flags, struct page **pages,
4f6da934 1085 struct vm_area_struct **vmas, int *locked)
4bbd4c77 1086{
df06b37f 1087 long ret = 0, i = 0;
fa5bb209 1088 struct vm_area_struct *vma = NULL;
df06b37f 1089 struct follow_page_context ctx = { NULL };
4bbd4c77
KS
1090
1091 if (!nr_pages)
1092 return 0;
1093
f9652594
AK
1094 start = untagged_addr(start);
1095
eddb1c22 1096 VM_BUG_ON(!!pages != !!(gup_flags & (FOLL_GET | FOLL_PIN)));
4bbd4c77
KS
1097
1098 /*
1099 * If FOLL_FORCE is set then do not force a full fault as the hinting
1100 * fault information is unrelated to the reference behaviour of a task
1101 * using the address space
1102 */
1103 if (!(gup_flags & FOLL_FORCE))
1104 gup_flags |= FOLL_NUMA;
1105
4bbd4c77 1106 do {
fa5bb209
KS
1107 struct page *page;
1108 unsigned int foll_flags = gup_flags;
1109 unsigned int page_increm;
1110
1111 /* first iteration or cross vma bound */
1112 if (!vma || start >= vma->vm_end) {
1113 vma = find_extend_vma(mm, start);
1114 if (!vma && in_gate_area(mm, start)) {
fa5bb209
KS
1115 ret = get_gate_page(mm, start & PAGE_MASK,
1116 gup_flags, &vma,
1117 pages ? &pages[i] : NULL);
1118 if (ret)
08be37b7 1119 goto out;
df06b37f 1120 ctx.page_mask = 0;
fa5bb209
KS
1121 goto next_page;
1122 }
4bbd4c77 1123
52650c8b 1124 if (!vma) {
df06b37f
KB
1125 ret = -EFAULT;
1126 goto out;
1127 }
52650c8b
JG
1128 ret = check_vma_flags(vma, gup_flags);
1129 if (ret)
1130 goto out;
1131
fa5bb209
KS
1132 if (is_vm_hugetlb_page(vma)) {
1133 i = follow_hugetlb_page(mm, vma, pages, vmas,
1134 &start, &nr_pages, i,
a308c71b 1135 gup_flags, locked);
ad415db8
PX
1136 if (locked && *locked == 0) {
1137 /*
1138 * We've got a VM_FAULT_RETRY
c1e8d7c6 1139 * and we've lost mmap_lock.
ad415db8
PX
1140 * We must stop here.
1141 */
1142 BUG_ON(gup_flags & FOLL_NOWAIT);
ad415db8
PX
1143 goto out;
1144 }
fa5bb209 1145 continue;
4bbd4c77 1146 }
fa5bb209
KS
1147 }
1148retry:
1149 /*
1150 * If we have a pending SIGKILL, don't keep faulting pages and
1151 * potentially allocating memory.
1152 */
fa45f116 1153 if (fatal_signal_pending(current)) {
d180870d 1154 ret = -EINTR;
df06b37f
KB
1155 goto out;
1156 }
fa5bb209 1157 cond_resched();
df06b37f
KB
1158
1159 page = follow_page_mask(vma, start, foll_flags, &ctx);
fa5bb209 1160 if (!page) {
64019a2e 1161 ret = faultin_page(vma, start, &foll_flags, locked);
fa5bb209
KS
1162 switch (ret) {
1163 case 0:
1164 goto retry;
df06b37f
KB
1165 case -EBUSY:
1166 ret = 0;
e4a9bc58 1167 fallthrough;
fa5bb209
KS
1168 case -EFAULT:
1169 case -ENOMEM:
1170 case -EHWPOISON:
df06b37f 1171 goto out;
4bbd4c77 1172 }
fa5bb209 1173 BUG();
1027e443
KS
1174 } else if (PTR_ERR(page) == -EEXIST) {
1175 /*
1176 * Proper page table entry exists, but no corresponding
1177 * struct page.
1178 */
1179 goto next_page;
1180 } else if (IS_ERR(page)) {
df06b37f
KB
1181 ret = PTR_ERR(page);
1182 goto out;
1027e443 1183 }
fa5bb209
KS
1184 if (pages) {
1185 pages[i] = page;
1186 flush_anon_page(vma, page, start);
1187 flush_dcache_page(page);
df06b37f 1188 ctx.page_mask = 0;
4bbd4c77 1189 }
4bbd4c77 1190next_page:
fa5bb209
KS
1191 if (vmas) {
1192 vmas[i] = vma;
df06b37f 1193 ctx.page_mask = 0;
fa5bb209 1194 }
df06b37f 1195 page_increm = 1 + (~(start >> PAGE_SHIFT) & ctx.page_mask);
fa5bb209
KS
1196 if (page_increm > nr_pages)
1197 page_increm = nr_pages;
1198 i += page_increm;
1199 start += page_increm * PAGE_SIZE;
1200 nr_pages -= page_increm;
4bbd4c77 1201 } while (nr_pages);
df06b37f
KB
1202out:
1203 if (ctx.pgmap)
1204 put_dev_pagemap(ctx.pgmap);
1205 return i ? i : ret;
4bbd4c77 1206}
4bbd4c77 1207
771ab430
TK
1208static bool vma_permits_fault(struct vm_area_struct *vma,
1209 unsigned int fault_flags)
d4925e00 1210{
1b2ee126
DH
1211 bool write = !!(fault_flags & FAULT_FLAG_WRITE);
1212 bool foreign = !!(fault_flags & FAULT_FLAG_REMOTE);
33a709b2 1213 vm_flags_t vm_flags = write ? VM_WRITE : VM_READ;
d4925e00
DH
1214
1215 if (!(vm_flags & vma->vm_flags))
1216 return false;
1217
33a709b2
DH
1218 /*
1219 * The architecture might have a hardware protection
1b2ee126 1220 * mechanism other than read/write that can deny access.
d61172b4
DH
1221 *
1222 * gup always represents data access, not instruction
1223 * fetches, so execute=false here:
33a709b2 1224 */
d61172b4 1225 if (!arch_vma_access_permitted(vma, write, false, foreign))
33a709b2
DH
1226 return false;
1227
d4925e00
DH
1228 return true;
1229}
1230
adc8cb40 1231/**
4bbd4c77 1232 * fixup_user_fault() - manually resolve a user page fault
4bbd4c77
KS
1233 * @mm: mm_struct of target mm
1234 * @address: user address
1235 * @fault_flags:flags to pass down to handle_mm_fault()
c1e8d7c6 1236 * @unlocked: did we unlock the mmap_lock while retrying, maybe NULL if caller
548b6a1e
MC
1237 * does not allow retry. If NULL, the caller must guarantee
1238 * that fault_flags does not contain FAULT_FLAG_ALLOW_RETRY.
4bbd4c77
KS
1239 *
1240 * This is meant to be called in the specific scenario where for locking reasons
1241 * we try to access user memory in atomic context (within a pagefault_disable()
1242 * section), this returns -EFAULT, and we want to resolve the user fault before
1243 * trying again.
1244 *
1245 * Typically this is meant to be used by the futex code.
1246 *
1247 * The main difference with get_user_pages() is that this function will
1248 * unconditionally call handle_mm_fault() which will in turn perform all the
1249 * necessary SW fixup of the dirty and young bits in the PTE, while
4a9e1cda 1250 * get_user_pages() only guarantees to update these in the struct page.
4bbd4c77
KS
1251 *
1252 * This is important for some architectures where those bits also gate the
1253 * access permission to the page because they are maintained in software. On
1254 * such architectures, gup() will not be enough to make a subsequent access
1255 * succeed.
1256 *
c1e8d7c6
ML
1257 * This function will not return with an unlocked mmap_lock. So it has not the
1258 * same semantics wrt the @mm->mmap_lock as does filemap_fault().
4bbd4c77 1259 */
64019a2e 1260int fixup_user_fault(struct mm_struct *mm,
4a9e1cda
DD
1261 unsigned long address, unsigned int fault_flags,
1262 bool *unlocked)
4bbd4c77
KS
1263{
1264 struct vm_area_struct *vma;
8fed2f3c 1265 vm_fault_t ret;
4a9e1cda 1266
f9652594
AK
1267 address = untagged_addr(address);
1268
4a9e1cda 1269 if (unlocked)
71335f37 1270 fault_flags |= FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
4bbd4c77 1271
4a9e1cda 1272retry:
4bbd4c77
KS
1273 vma = find_extend_vma(mm, address);
1274 if (!vma || address < vma->vm_start)
1275 return -EFAULT;
1276
d4925e00 1277 if (!vma_permits_fault(vma, fault_flags))
4bbd4c77
KS
1278 return -EFAULT;
1279
475f4dfc
PX
1280 if ((fault_flags & FAULT_FLAG_KILLABLE) &&
1281 fatal_signal_pending(current))
1282 return -EINTR;
1283
bce617ed 1284 ret = handle_mm_fault(vma, address, fault_flags, NULL);
4bbd4c77 1285 if (ret & VM_FAULT_ERROR) {
9a291a7c
JM
1286 int err = vm_fault_to_errno(ret, 0);
1287
1288 if (err)
1289 return err;
4bbd4c77
KS
1290 BUG();
1291 }
4a9e1cda
DD
1292
1293 if (ret & VM_FAULT_RETRY) {
d8ed45c5 1294 mmap_read_lock(mm);
475f4dfc
PX
1295 *unlocked = true;
1296 fault_flags |= FAULT_FLAG_TRIED;
1297 goto retry;
4a9e1cda
DD
1298 }
1299
4bbd4c77
KS
1300 return 0;
1301}
add6a0cd 1302EXPORT_SYMBOL_GPL(fixup_user_fault);
4bbd4c77 1303
2d3a36a4
MH
1304/*
1305 * Please note that this function, unlike __get_user_pages will not
1306 * return 0 for nr_pages > 0 without FOLL_NOWAIT
1307 */
64019a2e 1308static __always_inline long __get_user_pages_locked(struct mm_struct *mm,
f0818f47
AA
1309 unsigned long start,
1310 unsigned long nr_pages,
f0818f47
AA
1311 struct page **pages,
1312 struct vm_area_struct **vmas,
e716712f 1313 int *locked,
0fd71a56 1314 unsigned int flags)
f0818f47 1315{
f0818f47
AA
1316 long ret, pages_done;
1317 bool lock_dropped;
1318
1319 if (locked) {
1320 /* if VM_FAULT_RETRY can be returned, vmas become invalid */
1321 BUG_ON(vmas);
1322 /* check caller initialized locked */
1323 BUG_ON(*locked != 1);
1324 }
1325
a458b76a
AA
1326 if (flags & FOLL_PIN)
1327 mm_set_has_pinned_flag(&mm->flags);
008cfe44 1328
eddb1c22
JH
1329 /*
1330 * FOLL_PIN and FOLL_GET are mutually exclusive. Traditional behavior
1331 * is to set FOLL_GET if the caller wants pages[] filled in (but has
1332 * carelessly failed to specify FOLL_GET), so keep doing that, but only
1333 * for FOLL_GET, not for the newer FOLL_PIN.
1334 *
1335 * FOLL_PIN always expects pages to be non-null, but no need to assert
1336 * that here, as any failures will be obvious enough.
1337 */
1338 if (pages && !(flags & FOLL_PIN))
f0818f47 1339 flags |= FOLL_GET;
f0818f47
AA
1340
1341 pages_done = 0;
1342 lock_dropped = false;
1343 for (;;) {
64019a2e 1344 ret = __get_user_pages(mm, start, nr_pages, flags, pages,
f0818f47
AA
1345 vmas, locked);
1346 if (!locked)
1347 /* VM_FAULT_RETRY couldn't trigger, bypass */
1348 return ret;
1349
1350 /* VM_FAULT_RETRY cannot return errors */
1351 if (!*locked) {
1352 BUG_ON(ret < 0);
1353 BUG_ON(ret >= nr_pages);
1354 }
1355
f0818f47
AA
1356 if (ret > 0) {
1357 nr_pages -= ret;
1358 pages_done += ret;
1359 if (!nr_pages)
1360 break;
1361 }
1362 if (*locked) {
96312e61
AA
1363 /*
1364 * VM_FAULT_RETRY didn't trigger or it was a
1365 * FOLL_NOWAIT.
1366 */
f0818f47
AA
1367 if (!pages_done)
1368 pages_done = ret;
1369 break;
1370 }
df17277b
MR
1371 /*
1372 * VM_FAULT_RETRY triggered, so seek to the faulting offset.
1373 * For the prefault case (!pages) we only update counts.
1374 */
1375 if (likely(pages))
1376 pages += ret;
f0818f47 1377 start += ret << PAGE_SHIFT;
4426e945 1378 lock_dropped = true;
f0818f47 1379
4426e945 1380retry:
f0818f47
AA
1381 /*
1382 * Repeat on the address that fired VM_FAULT_RETRY
4426e945
PX
1383 * with both FAULT_FLAG_ALLOW_RETRY and
1384 * FAULT_FLAG_TRIED. Note that GUP can be interrupted
1385 * by fatal signals, so we need to check it before we
1386 * start trying again otherwise it can loop forever.
f0818f47 1387 */
4426e945 1388
ae46d2aa
HD
1389 if (fatal_signal_pending(current)) {
1390 if (!pages_done)
1391 pages_done = -EINTR;
4426e945 1392 break;
ae46d2aa 1393 }
4426e945 1394
d8ed45c5 1395 ret = mmap_read_lock_killable(mm);
71335f37
PX
1396 if (ret) {
1397 BUG_ON(ret > 0);
1398 if (!pages_done)
1399 pages_done = ret;
1400 break;
1401 }
4426e945 1402
c7b6a566 1403 *locked = 1;
64019a2e 1404 ret = __get_user_pages(mm, start, 1, flags | FOLL_TRIED,
4426e945
PX
1405 pages, NULL, locked);
1406 if (!*locked) {
1407 /* Continue to retry until we succeeded */
1408 BUG_ON(ret != 0);
1409 goto retry;
1410 }
f0818f47
AA
1411 if (ret != 1) {
1412 BUG_ON(ret > 1);
1413 if (!pages_done)
1414 pages_done = ret;
1415 break;
1416 }
1417 nr_pages--;
1418 pages_done++;
1419 if (!nr_pages)
1420 break;
df17277b
MR
1421 if (likely(pages))
1422 pages++;
f0818f47
AA
1423 start += PAGE_SIZE;
1424 }
e716712f 1425 if (lock_dropped && *locked) {
f0818f47
AA
1426 /*
1427 * We must let the caller know we temporarily dropped the lock
1428 * and so the critical section protected by it was lost.
1429 */
d8ed45c5 1430 mmap_read_unlock(mm);
f0818f47
AA
1431 *locked = 0;
1432 }
1433 return pages_done;
1434}
1435
d3649f68
CH
1436/**
1437 * populate_vma_page_range() - populate a range of pages in the vma.
1438 * @vma: target vma
1439 * @start: start address
1440 * @end: end address
c1e8d7c6 1441 * @locked: whether the mmap_lock is still held
d3649f68
CH
1442 *
1443 * This takes care of mlocking the pages too if VM_LOCKED is set.
1444 *
0a36f7f8
TY
1445 * Return either number of pages pinned in the vma, or a negative error
1446 * code on error.
d3649f68 1447 *
c1e8d7c6 1448 * vma->vm_mm->mmap_lock must be held.
d3649f68 1449 *
4f6da934 1450 * If @locked is NULL, it may be held for read or write and will
d3649f68
CH
1451 * be unperturbed.
1452 *
4f6da934
PX
1453 * If @locked is non-NULL, it must held for read only and may be
1454 * released. If it's released, *@locked will be set to 0.
d3649f68
CH
1455 */
1456long populate_vma_page_range(struct vm_area_struct *vma,
4f6da934 1457 unsigned long start, unsigned long end, int *locked)
d3649f68
CH
1458{
1459 struct mm_struct *mm = vma->vm_mm;
1460 unsigned long nr_pages = (end - start) / PAGE_SIZE;
1461 int gup_flags;
1462
be51eb18
ML
1463 VM_BUG_ON(!PAGE_ALIGNED(start));
1464 VM_BUG_ON(!PAGE_ALIGNED(end));
d3649f68
CH
1465 VM_BUG_ON_VMA(start < vma->vm_start, vma);
1466 VM_BUG_ON_VMA(end > vma->vm_end, vma);
42fc5414 1467 mmap_assert_locked(mm);
d3649f68 1468
b67bf49c
HD
1469 /*
1470 * Rightly or wrongly, the VM_LOCKONFAULT case has never used
1471 * faultin_page() to break COW, so it has no work to do here.
1472 */
d3649f68 1473 if (vma->vm_flags & VM_LOCKONFAULT)
b67bf49c
HD
1474 return nr_pages;
1475
1476 gup_flags = FOLL_TOUCH;
d3649f68
CH
1477 /*
1478 * We want to touch writable mappings with a write fault in order
1479 * to break COW, except for shared mappings because these don't COW
1480 * and we would not want to dirty them for nothing.
1481 */
1482 if ((vma->vm_flags & (VM_WRITE | VM_SHARED)) == VM_WRITE)
1483 gup_flags |= FOLL_WRITE;
1484
1485 /*
1486 * We want mlock to succeed for regions that have any permissions
1487 * other than PROT_NONE.
1488 */
3122e80e 1489 if (vma_is_accessible(vma))
d3649f68
CH
1490 gup_flags |= FOLL_FORCE;
1491
1492 /*
1493 * We made sure addr is within a VMA, so the following will
1494 * not result in a stack expansion that recurses back here.
1495 */
64019a2e 1496 return __get_user_pages(mm, start, nr_pages, gup_flags,
4f6da934 1497 NULL, NULL, locked);
d3649f68
CH
1498}
1499
4ca9b385
DH
1500/*
1501 * faultin_vma_page_range() - populate (prefault) page tables inside the
1502 * given VMA range readable/writable
1503 *
1504 * This takes care of mlocking the pages, too, if VM_LOCKED is set.
1505 *
1506 * @vma: target vma
1507 * @start: start address
1508 * @end: end address
1509 * @write: whether to prefault readable or writable
1510 * @locked: whether the mmap_lock is still held
1511 *
1512 * Returns either number of processed pages in the vma, or a negative error
1513 * code on error (see __get_user_pages()).
1514 *
1515 * vma->vm_mm->mmap_lock must be held. The range must be page-aligned and
1516 * covered by the VMA.
1517 *
1518 * If @locked is NULL, it may be held for read or write and will be unperturbed.
1519 *
1520 * If @locked is non-NULL, it must held for read only and may be released. If
1521 * it's released, *@locked will be set to 0.
1522 */
1523long faultin_vma_page_range(struct vm_area_struct *vma, unsigned long start,
1524 unsigned long end, bool write, int *locked)
1525{
1526 struct mm_struct *mm = vma->vm_mm;
1527 unsigned long nr_pages = (end - start) / PAGE_SIZE;
1528 int gup_flags;
1529
1530 VM_BUG_ON(!PAGE_ALIGNED(start));
1531 VM_BUG_ON(!PAGE_ALIGNED(end));
1532 VM_BUG_ON_VMA(start < vma->vm_start, vma);
1533 VM_BUG_ON_VMA(end > vma->vm_end, vma);
1534 mmap_assert_locked(mm);
1535
1536 /*
1537 * FOLL_TOUCH: Mark page accessed and thereby young; will also mark
1538 * the page dirty with FOLL_WRITE -- which doesn't make a
1539 * difference with !FOLL_FORCE, because the page is writable
1540 * in the page table.
1541 * FOLL_HWPOISON: Return -EHWPOISON instead of -EFAULT when we hit
1542 * a poisoned page.
4ca9b385
DH
1543 * !FOLL_FORCE: Require proper access permissions.
1544 */
b67bf49c 1545 gup_flags = FOLL_TOUCH | FOLL_HWPOISON;
4ca9b385
DH
1546 if (write)
1547 gup_flags |= FOLL_WRITE;
1548
1549 /*
eb2faa51
DH
1550 * We want to report -EINVAL instead of -EFAULT for any permission
1551 * problems or incompatible mappings.
4ca9b385 1552 */
eb2faa51
DH
1553 if (check_vma_flags(vma, gup_flags))
1554 return -EINVAL;
1555
4ca9b385
DH
1556 return __get_user_pages(mm, start, nr_pages, gup_flags,
1557 NULL, NULL, locked);
1558}
1559
d3649f68
CH
1560/*
1561 * __mm_populate - populate and/or mlock pages within a range of address space.
1562 *
1563 * This is used to implement mlock() and the MAP_POPULATE / MAP_LOCKED mmap
1564 * flags. VMAs must be already marked with the desired vm_flags, and
c1e8d7c6 1565 * mmap_lock must not be held.
d3649f68
CH
1566 */
1567int __mm_populate(unsigned long start, unsigned long len, int ignore_errors)
1568{
1569 struct mm_struct *mm = current->mm;
1570 unsigned long end, nstart, nend;
1571 struct vm_area_struct *vma = NULL;
1572 int locked = 0;
1573 long ret = 0;
1574
1575 end = start + len;
1576
1577 for (nstart = start; nstart < end; nstart = nend) {
1578 /*
1579 * We want to fault in pages for [nstart; end) address range.
1580 * Find first corresponding VMA.
1581 */
1582 if (!locked) {
1583 locked = 1;
d8ed45c5 1584 mmap_read_lock(mm);
d3649f68
CH
1585 vma = find_vma(mm, nstart);
1586 } else if (nstart >= vma->vm_end)
1587 vma = vma->vm_next;
1588 if (!vma || vma->vm_start >= end)
1589 break;
1590 /*
1591 * Set [nstart; nend) to intersection of desired address
1592 * range with the first VMA. Also, skip undesirable VMA types.
1593 */
1594 nend = min(end, vma->vm_end);
1595 if (vma->vm_flags & (VM_IO | VM_PFNMAP))
1596 continue;
1597 if (nstart < vma->vm_start)
1598 nstart = vma->vm_start;
1599 /*
1600 * Now fault in a range of pages. populate_vma_page_range()
1601 * double checks the vma flags, so that it won't mlock pages
1602 * if the vma was already munlocked.
1603 */
1604 ret = populate_vma_page_range(vma, nstart, nend, &locked);
1605 if (ret < 0) {
1606 if (ignore_errors) {
1607 ret = 0;
1608 continue; /* continue at next VMA */
1609 }
1610 break;
1611 }
1612 nend = nstart + ret * PAGE_SIZE;
1613 ret = 0;
1614 }
1615 if (locked)
d8ed45c5 1616 mmap_read_unlock(mm);
d3649f68
CH
1617 return ret; /* 0 or negative error code */
1618}
050a9adc 1619#else /* CONFIG_MMU */
64019a2e 1620static long __get_user_pages_locked(struct mm_struct *mm, unsigned long start,
050a9adc
CH
1621 unsigned long nr_pages, struct page **pages,
1622 struct vm_area_struct **vmas, int *locked,
1623 unsigned int foll_flags)
1624{
1625 struct vm_area_struct *vma;
1626 unsigned long vm_flags;
24dc20c7 1627 long i;
050a9adc
CH
1628
1629 /* calculate required read or write permissions.
1630 * If FOLL_FORCE is set, we only require the "MAY" flags.
1631 */
1632 vm_flags = (foll_flags & FOLL_WRITE) ?
1633 (VM_WRITE | VM_MAYWRITE) : (VM_READ | VM_MAYREAD);
1634 vm_flags &= (foll_flags & FOLL_FORCE) ?
1635 (VM_MAYREAD | VM_MAYWRITE) : (VM_READ | VM_WRITE);
1636
1637 for (i = 0; i < nr_pages; i++) {
1638 vma = find_vma(mm, start);
1639 if (!vma)
1640 goto finish_or_fault;
1641
1642 /* protect what we can, including chardevs */
1643 if ((vma->vm_flags & (VM_IO | VM_PFNMAP)) ||
1644 !(vm_flags & vma->vm_flags))
1645 goto finish_or_fault;
1646
1647 if (pages) {
1648 pages[i] = virt_to_page(start);
1649 if (pages[i])
1650 get_page(pages[i]);
1651 }
1652 if (vmas)
1653 vmas[i] = vma;
1654 start = (start + PAGE_SIZE) & PAGE_MASK;
1655 }
1656
1657 return i;
1658
1659finish_or_fault:
1660 return i ? : -EFAULT;
1661}
1662#endif /* !CONFIG_MMU */
d3649f68 1663
bb523b40
AG
1664/**
1665 * fault_in_writeable - fault in userspace address range for writing
1666 * @uaddr: start of address range
1667 * @size: size of address range
1668 *
1669 * Returns the number of bytes not faulted in (like copy_to_user() and
1670 * copy_from_user()).
1671 */
1672size_t fault_in_writeable(char __user *uaddr, size_t size)
1673{
1674 char __user *start = uaddr, *end;
1675
1676 if (unlikely(size == 0))
1677 return 0;
677b2a8c
CL
1678 if (!user_write_access_begin(uaddr, size))
1679 return size;
bb523b40 1680 if (!PAGE_ALIGNED(uaddr)) {
677b2a8c 1681 unsafe_put_user(0, uaddr, out);
bb523b40
AG
1682 uaddr = (char __user *)PAGE_ALIGN((unsigned long)uaddr);
1683 }
1684 end = (char __user *)PAGE_ALIGN((unsigned long)start + size);
1685 if (unlikely(end < start))
1686 end = NULL;
1687 while (uaddr != end) {
677b2a8c 1688 unsafe_put_user(0, uaddr, out);
bb523b40
AG
1689 uaddr += PAGE_SIZE;
1690 }
1691
1692out:
677b2a8c 1693 user_write_access_end();
bb523b40
AG
1694 if (size > uaddr - start)
1695 return size - (uaddr - start);
1696 return 0;
1697}
1698EXPORT_SYMBOL(fault_in_writeable);
1699
cdd591fc
AG
1700/*
1701 * fault_in_safe_writeable - fault in an address range for writing
1702 * @uaddr: start of address range
1703 * @size: length of address range
1704 *
1705 * Faults in an address range using get_user_pages, i.e., without triggering
1706 * hardware page faults. This is primarily useful when we already know that
1707 * some or all of the pages in the address range aren't in memory.
1708 *
1709 * Other than fault_in_writeable(), this function is non-destructive.
1710 *
1711 * Note that we don't pin or otherwise hold the pages referenced that we fault
1712 * in. There's no guarantee that they'll stay in memory for any duration of
1713 * time.
1714 *
1715 * Returns the number of bytes not faulted in, like copy_to_user() and
1716 * copy_from_user().
1717 */
1718size_t fault_in_safe_writeable(const char __user *uaddr, size_t size)
1719{
1720 unsigned long start = (unsigned long)untagged_addr(uaddr);
1721 unsigned long end, nstart, nend;
1722 struct mm_struct *mm = current->mm;
1723 struct vm_area_struct *vma = NULL;
1724 int locked = 0;
1725
1726 nstart = start & PAGE_MASK;
1727 end = PAGE_ALIGN(start + size);
1728 if (end < nstart)
1729 end = 0;
1730 for (; nstart != end; nstart = nend) {
1731 unsigned long nr_pages;
1732 long ret;
1733
1734 if (!locked) {
1735 locked = 1;
1736 mmap_read_lock(mm);
1737 vma = find_vma(mm, nstart);
1738 } else if (nstart >= vma->vm_end)
1739 vma = vma->vm_next;
1740 if (!vma || vma->vm_start >= end)
1741 break;
1742 nend = end ? min(end, vma->vm_end) : vma->vm_end;
1743 if (vma->vm_flags & (VM_IO | VM_PFNMAP))
1744 continue;
1745 if (nstart < vma->vm_start)
1746 nstart = vma->vm_start;
1747 nr_pages = (nend - nstart) / PAGE_SIZE;
1748 ret = __get_user_pages_locked(mm, nstart, nr_pages,
1749 NULL, NULL, &locked,
1750 FOLL_TOUCH | FOLL_WRITE);
1751 if (ret <= 0)
1752 break;
1753 nend = nstart + ret * PAGE_SIZE;
1754 }
1755 if (locked)
1756 mmap_read_unlock(mm);
1757 if (nstart == end)
1758 return 0;
1759 return size - min_t(size_t, nstart - start, size);
1760}
1761EXPORT_SYMBOL(fault_in_safe_writeable);
1762
bb523b40
AG
1763/**
1764 * fault_in_readable - fault in userspace address range for reading
1765 * @uaddr: start of user address range
1766 * @size: size of user address range
1767 *
1768 * Returns the number of bytes not faulted in (like copy_to_user() and
1769 * copy_from_user()).
1770 */
1771size_t fault_in_readable(const char __user *uaddr, size_t size)
1772{
1773 const char __user *start = uaddr, *end;
1774 volatile char c;
1775
1776 if (unlikely(size == 0))
1777 return 0;
677b2a8c
CL
1778 if (!user_read_access_begin(uaddr, size))
1779 return size;
bb523b40 1780 if (!PAGE_ALIGNED(uaddr)) {
677b2a8c 1781 unsafe_get_user(c, uaddr, out);
bb523b40
AG
1782 uaddr = (const char __user *)PAGE_ALIGN((unsigned long)uaddr);
1783 }
1784 end = (const char __user *)PAGE_ALIGN((unsigned long)start + size);
1785 if (unlikely(end < start))
1786 end = NULL;
1787 while (uaddr != end) {
677b2a8c 1788 unsafe_get_user(c, uaddr, out);
bb523b40
AG
1789 uaddr += PAGE_SIZE;
1790 }
1791
1792out:
677b2a8c 1793 user_read_access_end();
bb523b40
AG
1794 (void)c;
1795 if (size > uaddr - start)
1796 return size - (uaddr - start);
1797 return 0;
1798}
1799EXPORT_SYMBOL(fault_in_readable);
1800
8f942eea
JH
1801/**
1802 * get_dump_page() - pin user page in memory while writing it to core dump
1803 * @addr: user address
1804 *
1805 * Returns struct page pointer of user page pinned for dump,
1806 * to be freed afterwards by put_page().
1807 *
1808 * Returns NULL on any kind of failure - a hole must then be inserted into
1809 * the corefile, to preserve alignment with its headers; and also returns
1810 * NULL wherever the ZERO_PAGE, or an anonymous pte_none, has been found -
f0953a1b 1811 * allowing a hole to be left in the corefile to save disk space.
8f942eea 1812 *
7f3bfab5 1813 * Called without mmap_lock (takes and releases the mmap_lock by itself).
8f942eea
JH
1814 */
1815#ifdef CONFIG_ELF_CORE
1816struct page *get_dump_page(unsigned long addr)
1817{
7f3bfab5 1818 struct mm_struct *mm = current->mm;
8f942eea 1819 struct page *page;
7f3bfab5
JH
1820 int locked = 1;
1821 int ret;
8f942eea 1822
7f3bfab5 1823 if (mmap_read_lock_killable(mm))
8f942eea 1824 return NULL;
7f3bfab5
JH
1825 ret = __get_user_pages_locked(mm, addr, 1, &page, NULL, &locked,
1826 FOLL_FORCE | FOLL_DUMP | FOLL_GET);
1827 if (locked)
1828 mmap_read_unlock(mm);
1829 return (ret == 1) ? page : NULL;
8f942eea
JH
1830}
1831#endif /* CONFIG_ELF_CORE */
1832
d1e153fe 1833#ifdef CONFIG_MIGRATION
f68749ec
PT
1834/*
1835 * Check whether all pages are pinnable, if so return number of pages. If some
1836 * pages are not pinnable, migrate them, and unpin all pages. Return zero if
1837 * pages were migrated, or if some pages were not successfully isolated.
1838 * Return negative error if migration fails.
1839 */
1840static long check_and_migrate_movable_pages(unsigned long nr_pages,
d1e153fe 1841 struct page **pages,
d1e153fe 1842 unsigned int gup_flags)
9a4e9f3b 1843{
f68749ec
PT
1844 unsigned long i;
1845 unsigned long isolation_error_count = 0;
1846 bool drain_allow = true;
d1e153fe 1847 LIST_HEAD(movable_page_list);
f68749ec
PT
1848 long ret = 0;
1849 struct page *prev_head = NULL;
1850 struct page *head;
ed03d924
JK
1851 struct migration_target_control mtc = {
1852 .nid = NUMA_NO_NODE,
c991ffef 1853 .gfp_mask = GFP_USER | __GFP_NOWARN,
ed03d924 1854 };
9a4e9f3b 1855
83c02c23
PT
1856 for (i = 0; i < nr_pages; i++) {
1857 head = compound_head(pages[i]);
1858 if (head == prev_head)
1859 continue;
1860 prev_head = head;
9a4e9f3b 1861 /*
d1e153fe
PT
1862 * If we get a movable page, since we are going to be pinning
1863 * these entries, try to move them out if possible.
9a4e9f3b 1864 */
d1e153fe 1865 if (!is_pinnable_page(head)) {
6e7f34eb 1866 if (PageHuge(head)) {
d1e153fe 1867 if (!isolate_huge_page(head, &movable_page_list))
6e7f34eb
PT
1868 isolation_error_count++;
1869 } else {
9a4e9f3b
AK
1870 if (!PageLRU(head) && drain_allow) {
1871 lru_add_drain_all();
1872 drain_allow = false;
1873 }
1874
6e7f34eb
PT
1875 if (isolate_lru_page(head)) {
1876 isolation_error_count++;
1877 continue;
9a4e9f3b 1878 }
d1e153fe 1879 list_add_tail(&head->lru, &movable_page_list);
6e7f34eb
PT
1880 mod_node_page_state(page_pgdat(head),
1881 NR_ISOLATED_ANON +
1882 page_is_file_lru(head),
1883 thp_nr_pages(head));
9a4e9f3b
AK
1884 }
1885 }
1886 }
1887
6e7f34eb
PT
1888 /*
1889 * If list is empty, and no isolation errors, means that all pages are
1890 * in the correct zone.
1891 */
d1e153fe 1892 if (list_empty(&movable_page_list) && !isolation_error_count)
f68749ec 1893 return nr_pages;
6e7f34eb 1894
f68749ec
PT
1895 if (gup_flags & FOLL_PIN) {
1896 unpin_user_pages(pages, nr_pages);
1897 } else {
1898 for (i = 0; i < nr_pages; i++)
1899 put_page(pages[i]);
1900 }
d1e153fe 1901 if (!list_empty(&movable_page_list)) {
d1e153fe 1902 ret = migrate_pages(&movable_page_list, alloc_migration_target,
f0f44638 1903 NULL, (unsigned long)&mtc, MIGRATE_SYNC,
5ac95884 1904 MR_LONGTERM_PIN, NULL);
f68749ec
PT
1905 if (ret && !list_empty(&movable_page_list))
1906 putback_movable_pages(&movable_page_list);
9a4e9f3b
AK
1907 }
1908
f68749ec 1909 return ret > 0 ? -ENOMEM : ret;
9a4e9f3b
AK
1910}
1911#else
f68749ec 1912static long check_and_migrate_movable_pages(unsigned long nr_pages,
d1e153fe 1913 struct page **pages,
d1e153fe 1914 unsigned int gup_flags)
9a4e9f3b
AK
1915{
1916 return nr_pages;
1917}
d1e153fe 1918#endif /* CONFIG_MIGRATION */
9a4e9f3b 1919
2bb6d283 1920/*
932f4a63
IW
1921 * __gup_longterm_locked() is a wrapper for __get_user_pages_locked which
1922 * allows us to process the FOLL_LONGTERM flag.
2bb6d283 1923 */
64019a2e 1924static long __gup_longterm_locked(struct mm_struct *mm,
932f4a63
IW
1925 unsigned long start,
1926 unsigned long nr_pages,
1927 struct page **pages,
1928 struct vm_area_struct **vmas,
1929 unsigned int gup_flags)
2bb6d283 1930{
f68749ec 1931 unsigned int flags;
52650c8b 1932 long rc;
2bb6d283 1933
f68749ec
PT
1934 if (!(gup_flags & FOLL_LONGTERM))
1935 return __get_user_pages_locked(mm, start, nr_pages, pages, vmas,
1936 NULL, gup_flags);
1937 flags = memalloc_pin_save();
1938 do {
1939 rc = __get_user_pages_locked(mm, start, nr_pages, pages, vmas,
1940 NULL, gup_flags);
1941 if (rc <= 0)
1942 break;
1943 rc = check_and_migrate_movable_pages(rc, pages, gup_flags);
1944 } while (!rc);
1945 memalloc_pin_restore(flags);
2bb6d283 1946
2bb6d283
DW
1947 return rc;
1948}
932f4a63 1949
447f3e45
BS
1950static bool is_valid_gup_flags(unsigned int gup_flags)
1951{
1952 /*
1953 * FOLL_PIN must only be set internally by the pin_user_pages*() APIs,
1954 * never directly by the caller, so enforce that with an assertion:
1955 */
1956 if (WARN_ON_ONCE(gup_flags & FOLL_PIN))
1957 return false;
1958 /*
1959 * FOLL_PIN is a prerequisite to FOLL_LONGTERM. Another way of saying
1960 * that is, FOLL_LONGTERM is a specific case, more restrictive case of
1961 * FOLL_PIN.
1962 */
1963 if (WARN_ON_ONCE(gup_flags & FOLL_LONGTERM))
1964 return false;
1965
1966 return true;
1967}
1968
22bf29b6 1969#ifdef CONFIG_MMU
64019a2e 1970static long __get_user_pages_remote(struct mm_struct *mm,
22bf29b6
JH
1971 unsigned long start, unsigned long nr_pages,
1972 unsigned int gup_flags, struct page **pages,
1973 struct vm_area_struct **vmas, int *locked)
1974{
1975 /*
1976 * Parts of FOLL_LONGTERM behavior are incompatible with
1977 * FAULT_FLAG_ALLOW_RETRY because of the FS DAX check requirement on
1978 * vmas. However, this only comes up if locked is set, and there are
1979 * callers that do request FOLL_LONGTERM, but do not set locked. So,
1980 * allow what we can.
1981 */
1982 if (gup_flags & FOLL_LONGTERM) {
1983 if (WARN_ON_ONCE(locked))
1984 return -EINVAL;
1985 /*
1986 * This will check the vmas (even if our vmas arg is NULL)
1987 * and return -ENOTSUPP if DAX isn't allowed in this case:
1988 */
64019a2e 1989 return __gup_longterm_locked(mm, start, nr_pages, pages,
22bf29b6
JH
1990 vmas, gup_flags | FOLL_TOUCH |
1991 FOLL_REMOTE);
1992 }
1993
64019a2e 1994 return __get_user_pages_locked(mm, start, nr_pages, pages, vmas,
22bf29b6
JH
1995 locked,
1996 gup_flags | FOLL_TOUCH | FOLL_REMOTE);
1997}
1998
adc8cb40 1999/**
c4237f8b 2000 * get_user_pages_remote() - pin user pages in memory
c4237f8b
JH
2001 * @mm: mm_struct of target mm
2002 * @start: starting user address
2003 * @nr_pages: number of pages from start to pin
2004 * @gup_flags: flags modifying lookup behaviour
2005 * @pages: array that receives pointers to the pages pinned.
2006 * Should be at least nr_pages long. Or NULL, if caller
2007 * only intends to ensure the pages are faulted in.
2008 * @vmas: array of pointers to vmas corresponding to each page.
2009 * Or NULL if the caller does not require them.
2010 * @locked: pointer to lock flag indicating whether lock is held and
2011 * subsequently whether VM_FAULT_RETRY functionality can be
2012 * utilised. Lock must initially be held.
2013 *
2014 * Returns either number of pages pinned (which may be less than the
2015 * number requested), or an error. Details about the return value:
2016 *
2017 * -- If nr_pages is 0, returns 0.
2018 * -- If nr_pages is >0, but no pages were pinned, returns -errno.
2019 * -- If nr_pages is >0, and some pages were pinned, returns the number of
2020 * pages pinned. Again, this may be less than nr_pages.
2021 *
2022 * The caller is responsible for releasing returned @pages, via put_page().
2023 *
c1e8d7c6 2024 * @vmas are valid only as long as mmap_lock is held.
c4237f8b 2025 *
c1e8d7c6 2026 * Must be called with mmap_lock held for read or write.
c4237f8b 2027 *
adc8cb40
SJ
2028 * get_user_pages_remote walks a process's page tables and takes a reference
2029 * to each struct page that each user address corresponds to at a given
c4237f8b
JH
2030 * instant. That is, it takes the page that would be accessed if a user
2031 * thread accesses the given user virtual address at that instant.
2032 *
2033 * This does not guarantee that the page exists in the user mappings when
adc8cb40 2034 * get_user_pages_remote returns, and there may even be a completely different
c4237f8b
JH
2035 * page there in some cases (eg. if mmapped pagecache has been invalidated
2036 * and subsequently re faulted). However it does guarantee that the page
2037 * won't be freed completely. And mostly callers simply care that the page
2038 * contains data that was valid *at some point in time*. Typically, an IO
2039 * or similar operation cannot guarantee anything stronger anyway because
2040 * locks can't be held over the syscall boundary.
2041 *
2042 * If gup_flags & FOLL_WRITE == 0, the page must not be written to. If the page
2043 * is written to, set_page_dirty (or set_page_dirty_lock, as appropriate) must
2044 * be called after the page is finished with, and before put_page is called.
2045 *
adc8cb40
SJ
2046 * get_user_pages_remote is typically used for fewer-copy IO operations,
2047 * to get a handle on the memory by some means other than accesses
2048 * via the user virtual addresses. The pages may be submitted for
2049 * DMA to devices or accessed via their kernel linear mapping (via the
2050 * kmap APIs). Care should be taken to use the correct cache flushing APIs.
c4237f8b
JH
2051 *
2052 * See also get_user_pages_fast, for performance critical applications.
2053 *
adc8cb40 2054 * get_user_pages_remote should be phased out in favor of
c4237f8b 2055 * get_user_pages_locked|unlocked or get_user_pages_fast. Nothing
adc8cb40 2056 * should use get_user_pages_remote because it cannot pass
c4237f8b
JH
2057 * FAULT_FLAG_ALLOW_RETRY to handle_mm_fault.
2058 */
64019a2e 2059long get_user_pages_remote(struct mm_struct *mm,
c4237f8b
JH
2060 unsigned long start, unsigned long nr_pages,
2061 unsigned int gup_flags, struct page **pages,
2062 struct vm_area_struct **vmas, int *locked)
2063{
447f3e45 2064 if (!is_valid_gup_flags(gup_flags))
eddb1c22
JH
2065 return -EINVAL;
2066
64019a2e 2067 return __get_user_pages_remote(mm, start, nr_pages, gup_flags,
22bf29b6 2068 pages, vmas, locked);
c4237f8b
JH
2069}
2070EXPORT_SYMBOL(get_user_pages_remote);
2071
eddb1c22 2072#else /* CONFIG_MMU */
64019a2e 2073long get_user_pages_remote(struct mm_struct *mm,
eddb1c22
JH
2074 unsigned long start, unsigned long nr_pages,
2075 unsigned int gup_flags, struct page **pages,
2076 struct vm_area_struct **vmas, int *locked)
2077{
2078 return 0;
2079}
3faa52c0 2080
64019a2e 2081static long __get_user_pages_remote(struct mm_struct *mm,
3faa52c0
JH
2082 unsigned long start, unsigned long nr_pages,
2083 unsigned int gup_flags, struct page **pages,
2084 struct vm_area_struct **vmas, int *locked)
2085{
2086 return 0;
2087}
eddb1c22
JH
2088#endif /* !CONFIG_MMU */
2089
adc8cb40
SJ
2090/**
2091 * get_user_pages() - pin user pages in memory
2092 * @start: starting user address
2093 * @nr_pages: number of pages from start to pin
2094 * @gup_flags: flags modifying lookup behaviour
2095 * @pages: array that receives pointers to the pages pinned.
2096 * Should be at least nr_pages long. Or NULL, if caller
2097 * only intends to ensure the pages are faulted in.
2098 * @vmas: array of pointers to vmas corresponding to each page.
2099 * Or NULL if the caller does not require them.
2100 *
64019a2e
PX
2101 * This is the same as get_user_pages_remote(), just with a less-flexible
2102 * calling convention where we assume that the mm being operated on belongs to
2103 * the current task, and doesn't allow passing of a locked parameter. We also
2104 * obviously don't pass FOLL_REMOTE in here.
932f4a63
IW
2105 */
2106long get_user_pages(unsigned long start, unsigned long nr_pages,
2107 unsigned int gup_flags, struct page **pages,
2108 struct vm_area_struct **vmas)
2109{
447f3e45 2110 if (!is_valid_gup_flags(gup_flags))
eddb1c22
JH
2111 return -EINVAL;
2112
64019a2e 2113 return __gup_longterm_locked(current->mm, start, nr_pages,
932f4a63
IW
2114 pages, vmas, gup_flags | FOLL_TOUCH);
2115}
2116EXPORT_SYMBOL(get_user_pages);
2bb6d283 2117
adc8cb40 2118/**
a00cda3f
MCC
2119 * get_user_pages_locked() - variant of get_user_pages()
2120 *
2121 * @start: starting user address
2122 * @nr_pages: number of pages from start to pin
2123 * @gup_flags: flags modifying lookup behaviour
2124 * @pages: array that receives pointers to the pages pinned.
2125 * Should be at least nr_pages long. Or NULL, if caller
2126 * only intends to ensure the pages are faulted in.
2127 * @locked: pointer to lock flag indicating whether lock is held and
2128 * subsequently whether VM_FAULT_RETRY functionality can be
2129 * utilised. Lock must initially be held.
2130 *
2131 * It is suitable to replace the form:
acc3c8d1 2132 *
3e4e28c5 2133 * mmap_read_lock(mm);
d3649f68 2134 * do_something()
64019a2e 2135 * get_user_pages(mm, ..., pages, NULL);
3e4e28c5 2136 * mmap_read_unlock(mm);
acc3c8d1 2137 *
d3649f68 2138 * to:
acc3c8d1 2139 *
d3649f68 2140 * int locked = 1;
3e4e28c5 2141 * mmap_read_lock(mm);
d3649f68 2142 * do_something()
64019a2e 2143 * get_user_pages_locked(mm, ..., pages, &locked);
d3649f68 2144 * if (locked)
3e4e28c5 2145 * mmap_read_unlock(mm);
adc8cb40 2146 *
adc8cb40
SJ
2147 * We can leverage the VM_FAULT_RETRY functionality in the page fault
2148 * paths better by using either get_user_pages_locked() or
2149 * get_user_pages_unlocked().
2150 *
acc3c8d1 2151 */
d3649f68
CH
2152long get_user_pages_locked(unsigned long start, unsigned long nr_pages,
2153 unsigned int gup_flags, struct page **pages,
2154 int *locked)
acc3c8d1 2155{
acc3c8d1 2156 /*
d3649f68
CH
2157 * FIXME: Current FOLL_LONGTERM behavior is incompatible with
2158 * FAULT_FLAG_ALLOW_RETRY because of the FS DAX check requirement on
2159 * vmas. As there are no users of this flag in this call we simply
2160 * disallow this option for now.
acc3c8d1 2161 */
d3649f68
CH
2162 if (WARN_ON_ONCE(gup_flags & FOLL_LONGTERM))
2163 return -EINVAL;
420c2091
JH
2164 /*
2165 * FOLL_PIN must only be set internally by the pin_user_pages*() APIs,
2166 * never directly by the caller, so enforce that:
2167 */
2168 if (WARN_ON_ONCE(gup_flags & FOLL_PIN))
2169 return -EINVAL;
acc3c8d1 2170
64019a2e 2171 return __get_user_pages_locked(current->mm, start, nr_pages,
d3649f68
CH
2172 pages, NULL, locked,
2173 gup_flags | FOLL_TOUCH);
acc3c8d1 2174}
d3649f68 2175EXPORT_SYMBOL(get_user_pages_locked);
acc3c8d1
KS
2176
2177/*
d3649f68 2178 * get_user_pages_unlocked() is suitable to replace the form:
acc3c8d1 2179 *
3e4e28c5 2180 * mmap_read_lock(mm);
64019a2e 2181 * get_user_pages(mm, ..., pages, NULL);
3e4e28c5 2182 * mmap_read_unlock(mm);
d3649f68
CH
2183 *
2184 * with:
2185 *
64019a2e 2186 * get_user_pages_unlocked(mm, ..., pages);
d3649f68
CH
2187 *
2188 * It is functionally equivalent to get_user_pages_fast so
2189 * get_user_pages_fast should be used instead if specific gup_flags
2190 * (e.g. FOLL_FORCE) are not required.
acc3c8d1 2191 */
d3649f68
CH
2192long get_user_pages_unlocked(unsigned long start, unsigned long nr_pages,
2193 struct page **pages, unsigned int gup_flags)
acc3c8d1
KS
2194{
2195 struct mm_struct *mm = current->mm;
d3649f68
CH
2196 int locked = 1;
2197 long ret;
acc3c8d1 2198
d3649f68
CH
2199 /*
2200 * FIXME: Current FOLL_LONGTERM behavior is incompatible with
2201 * FAULT_FLAG_ALLOW_RETRY because of the FS DAX check requirement on
2202 * vmas. As there are no users of this flag in this call we simply
2203 * disallow this option for now.
2204 */
2205 if (WARN_ON_ONCE(gup_flags & FOLL_LONGTERM))
2206 return -EINVAL;
acc3c8d1 2207
d8ed45c5 2208 mmap_read_lock(mm);
64019a2e 2209 ret = __get_user_pages_locked(mm, start, nr_pages, pages, NULL,
d3649f68 2210 &locked, gup_flags | FOLL_TOUCH);
acc3c8d1 2211 if (locked)
d8ed45c5 2212 mmap_read_unlock(mm);
d3649f68 2213 return ret;
4bbd4c77 2214}
d3649f68 2215EXPORT_SYMBOL(get_user_pages_unlocked);
2667f50e
SC
2216
2217/*
67a929e0 2218 * Fast GUP
2667f50e
SC
2219 *
2220 * get_user_pages_fast attempts to pin user pages by walking the page
2221 * tables directly and avoids taking locks. Thus the walker needs to be
2222 * protected from page table pages being freed from under it, and should
2223 * block any THP splits.
2224 *
2225 * One way to achieve this is to have the walker disable interrupts, and
2226 * rely on IPIs from the TLB flushing code blocking before the page table
2227 * pages are freed. This is unsuitable for architectures that do not need
2228 * to broadcast an IPI when invalidating TLBs.
2229 *
2230 * Another way to achieve this is to batch up page table containing pages
2231 * belonging to more than one mm_user, then rcu_sched a callback to free those
2232 * pages. Disabling interrupts will allow the fast_gup walker to both block
2233 * the rcu_sched callback, and an IPI that we broadcast for splitting THPs
2234 * (which is a relatively rare event). The code below adopts this strategy.
2235 *
2236 * Before activating this code, please be aware that the following assumptions
2237 * are currently made:
2238 *
ff2e6d72 2239 * *) Either MMU_GATHER_RCU_TABLE_FREE is enabled, and tlb_remove_table() is used to
e585513b 2240 * free pages containing page tables or TLB flushing requires IPI broadcast.
2667f50e 2241 *
2667f50e
SC
2242 * *) ptes can be read atomically by the architecture.
2243 *
2244 * *) access_ok is sufficient to validate userspace address ranges.
2245 *
2246 * The last two assumptions can be relaxed by the addition of helper functions.
2247 *
2248 * This code is based heavily on the PowerPC implementation by Nick Piggin.
2249 */
67a929e0 2250#ifdef CONFIG_HAVE_FAST_GUP
3faa52c0 2251
790c7369 2252static void __maybe_unused undo_dev_pagemap(int *nr, int nr_start,
3b78d834 2253 unsigned int flags,
790c7369 2254 struct page **pages)
b59f65fa
KS
2255{
2256 while ((*nr) - nr_start) {
2257 struct page *page = pages[--(*nr)];
2258
2259 ClearPageReferenced(page);
3faa52c0
JH
2260 if (flags & FOLL_PIN)
2261 unpin_user_page(page);
2262 else
2263 put_page(page);
b59f65fa
KS
2264 }
2265}
2266
3010a5ea 2267#ifdef CONFIG_ARCH_HAS_PTE_SPECIAL
2667f50e 2268static int gup_pte_range(pmd_t pmd, unsigned long addr, unsigned long end,
b798bec4 2269 unsigned int flags, struct page **pages, int *nr)
2667f50e 2270{
b59f65fa
KS
2271 struct dev_pagemap *pgmap = NULL;
2272 int nr_start = *nr, ret = 0;
2667f50e 2273 pte_t *ptep, *ptem;
2667f50e
SC
2274
2275 ptem = ptep = pte_offset_map(&pmd, addr);
2276 do {
2a4a06da 2277 pte_t pte = ptep_get_lockless(ptep);
7aef4172 2278 struct page *head, *page;
2667f50e
SC
2279
2280 /*
2281 * Similar to the PMD case below, NUMA hinting must take slow
8a0516ed 2282 * path using the pte_protnone check.
2667f50e 2283 */
e7884f8e
KS
2284 if (pte_protnone(pte))
2285 goto pte_unmap;
2286
b798bec4 2287 if (!pte_access_permitted(pte, flags & FOLL_WRITE))
e7884f8e
KS
2288 goto pte_unmap;
2289
b59f65fa 2290 if (pte_devmap(pte)) {
7af75561
IW
2291 if (unlikely(flags & FOLL_LONGTERM))
2292 goto pte_unmap;
2293
b59f65fa
KS
2294 pgmap = get_dev_pagemap(pte_pfn(pte), pgmap);
2295 if (unlikely(!pgmap)) {
3b78d834 2296 undo_dev_pagemap(nr, nr_start, flags, pages);
b59f65fa
KS
2297 goto pte_unmap;
2298 }
2299 } else if (pte_special(pte))
2667f50e
SC
2300 goto pte_unmap;
2301
2302 VM_BUG_ON(!pfn_valid(pte_pfn(pte)));
2303 page = pte_page(pte);
2304
3faa52c0 2305 head = try_grab_compound_head(page, 1, flags);
8fde12ca 2306 if (!head)
2667f50e
SC
2307 goto pte_unmap;
2308
1507f512
MR
2309 if (unlikely(page_is_secretmem(page))) {
2310 put_compound_head(head, 1, flags);
2311 goto pte_unmap;
2312 }
2313
2667f50e 2314 if (unlikely(pte_val(pte) != pte_val(*ptep))) {
3faa52c0 2315 put_compound_head(head, 1, flags);
2667f50e
SC
2316 goto pte_unmap;
2317 }
2318
7aef4172 2319 VM_BUG_ON_PAGE(compound_head(page) != head, page);
e9348053 2320
f28d4363
CI
2321 /*
2322 * We need to make the page accessible if and only if we are
2323 * going to access its content (the FOLL_PIN case). Please
2324 * see Documentation/core-api/pin_user_pages.rst for
2325 * details.
2326 */
2327 if (flags & FOLL_PIN) {
2328 ret = arch_make_page_accessible(page);
2329 if (ret) {
2330 unpin_user_page(page);
2331 goto pte_unmap;
2332 }
2333 }
e9348053 2334 SetPageReferenced(page);
2667f50e
SC
2335 pages[*nr] = page;
2336 (*nr)++;
2337
2338 } while (ptep++, addr += PAGE_SIZE, addr != end);
2339
2340 ret = 1;
2341
2342pte_unmap:
832d7aa0
CH
2343 if (pgmap)
2344 put_dev_pagemap(pgmap);
2667f50e
SC
2345 pte_unmap(ptem);
2346 return ret;
2347}
2348#else
2349
2350/*
2351 * If we can't determine whether or not a pte is special, then fail immediately
2352 * for ptes. Note, we can still pin HugeTLB and THP as these are guaranteed not
2353 * to be special.
2354 *
2355 * For a futex to be placed on a THP tail page, get_futex_key requires a
dadbb612 2356 * get_user_pages_fast_only implementation that can pin pages. Thus it's still
2667f50e
SC
2357 * useful to have gup_huge_pmd even if we can't operate on ptes.
2358 */
2359static int gup_pte_range(pmd_t pmd, unsigned long addr, unsigned long end,
b798bec4 2360 unsigned int flags, struct page **pages, int *nr)
2667f50e
SC
2361{
2362 return 0;
2363}
3010a5ea 2364#endif /* CONFIG_ARCH_HAS_PTE_SPECIAL */
2667f50e 2365
17596731 2366#if defined(CONFIG_ARCH_HAS_PTE_DEVMAP) && defined(CONFIG_TRANSPARENT_HUGEPAGE)
b59f65fa 2367static int __gup_device_huge(unsigned long pfn, unsigned long addr,
86dfbed4
JH
2368 unsigned long end, unsigned int flags,
2369 struct page **pages, int *nr)
b59f65fa
KS
2370{
2371 int nr_start = *nr;
2372 struct dev_pagemap *pgmap = NULL;
2373
2374 do {
2375 struct page *page = pfn_to_page(pfn);
2376
2377 pgmap = get_dev_pagemap(pfn, pgmap);
2378 if (unlikely(!pgmap)) {
3b78d834 2379 undo_dev_pagemap(nr, nr_start, flags, pages);
6401c4eb 2380 break;
b59f65fa
KS
2381 }
2382 SetPageReferenced(page);
2383 pages[*nr] = page;
3faa52c0
JH
2384 if (unlikely(!try_grab_page(page, flags))) {
2385 undo_dev_pagemap(nr, nr_start, flags, pages);
6401c4eb 2386 break;
3faa52c0 2387 }
b59f65fa
KS
2388 (*nr)++;
2389 pfn++;
2390 } while (addr += PAGE_SIZE, addr != end);
832d7aa0 2391
6401c4eb 2392 put_dev_pagemap(pgmap);
20b7fee7 2393 return addr == end;
b59f65fa
KS
2394}
2395
a9b6de77 2396static int __gup_device_huge_pmd(pmd_t orig, pmd_t *pmdp, unsigned long addr,
86dfbed4
JH
2397 unsigned long end, unsigned int flags,
2398 struct page **pages, int *nr)
b59f65fa
KS
2399{
2400 unsigned long fault_pfn;
a9b6de77
DW
2401 int nr_start = *nr;
2402
2403 fault_pfn = pmd_pfn(orig) + ((addr & ~PMD_MASK) >> PAGE_SHIFT);
86dfbed4 2404 if (!__gup_device_huge(fault_pfn, addr, end, flags, pages, nr))
a9b6de77 2405 return 0;
b59f65fa 2406
a9b6de77 2407 if (unlikely(pmd_val(orig) != pmd_val(*pmdp))) {
3b78d834 2408 undo_dev_pagemap(nr, nr_start, flags, pages);
a9b6de77
DW
2409 return 0;
2410 }
2411 return 1;
b59f65fa
KS
2412}
2413
a9b6de77 2414static int __gup_device_huge_pud(pud_t orig, pud_t *pudp, unsigned long addr,
86dfbed4
JH
2415 unsigned long end, unsigned int flags,
2416 struct page **pages, int *nr)
b59f65fa
KS
2417{
2418 unsigned long fault_pfn;
a9b6de77
DW
2419 int nr_start = *nr;
2420
2421 fault_pfn = pud_pfn(orig) + ((addr & ~PUD_MASK) >> PAGE_SHIFT);
86dfbed4 2422 if (!__gup_device_huge(fault_pfn, addr, end, flags, pages, nr))
a9b6de77 2423 return 0;
b59f65fa 2424
a9b6de77 2425 if (unlikely(pud_val(orig) != pud_val(*pudp))) {
3b78d834 2426 undo_dev_pagemap(nr, nr_start, flags, pages);
a9b6de77
DW
2427 return 0;
2428 }
2429 return 1;
b59f65fa
KS
2430}
2431#else
a9b6de77 2432static int __gup_device_huge_pmd(pmd_t orig, pmd_t *pmdp, unsigned long addr,
86dfbed4
JH
2433 unsigned long end, unsigned int flags,
2434 struct page **pages, int *nr)
b59f65fa
KS
2435{
2436 BUILD_BUG();
2437 return 0;
2438}
2439
a9b6de77 2440static int __gup_device_huge_pud(pud_t pud, pud_t *pudp, unsigned long addr,
86dfbed4
JH
2441 unsigned long end, unsigned int flags,
2442 struct page **pages, int *nr)
b59f65fa
KS
2443{
2444 BUILD_BUG();
2445 return 0;
2446}
2447#endif
2448
a43e9820
JH
2449static int record_subpages(struct page *page, unsigned long addr,
2450 unsigned long end, struct page **pages)
2451{
2452 int nr;
2453
2454 for (nr = 0; addr != end; addr += PAGE_SIZE)
2455 pages[nr++] = page++;
2456
2457 return nr;
2458}
2459
cbd34da7
CH
2460#ifdef CONFIG_ARCH_HAS_HUGEPD
2461static unsigned long hugepte_addr_end(unsigned long addr, unsigned long end,
2462 unsigned long sz)
2463{
2464 unsigned long __boundary = (addr + sz) & ~(sz-1);
2465 return (__boundary - 1 < end - 1) ? __boundary : end;
2466}
2467
2468static int gup_hugepte(pte_t *ptep, unsigned long sz, unsigned long addr,
0cd22afd
JH
2469 unsigned long end, unsigned int flags,
2470 struct page **pages, int *nr)
cbd34da7
CH
2471{
2472 unsigned long pte_end;
2473 struct page *head, *page;
2474 pte_t pte;
2475 int refs;
2476
2477 pte_end = (addr + sz) & ~(sz-1);
2478 if (pte_end < end)
2479 end = pte_end;
2480
55ca2263 2481 pte = huge_ptep_get(ptep);
cbd34da7 2482
0cd22afd 2483 if (!pte_access_permitted(pte, flags & FOLL_WRITE))
cbd34da7
CH
2484 return 0;
2485
2486 /* hugepages are never "special" */
2487 VM_BUG_ON(!pfn_valid(pte_pfn(pte)));
2488
cbd34da7 2489 head = pte_page(pte);
cbd34da7 2490 page = head + ((addr & (sz-1)) >> PAGE_SHIFT);
a43e9820 2491 refs = record_subpages(page, addr, end, pages + *nr);
cbd34da7 2492
3faa52c0 2493 head = try_grab_compound_head(head, refs, flags);
a43e9820 2494 if (!head)
cbd34da7 2495 return 0;
cbd34da7
CH
2496
2497 if (unlikely(pte_val(pte) != pte_val(*ptep))) {
3b78d834 2498 put_compound_head(head, refs, flags);
cbd34da7
CH
2499 return 0;
2500 }
2501
a43e9820 2502 *nr += refs;
520b4a44 2503 SetPageReferenced(head);
cbd34da7
CH
2504 return 1;
2505}
2506
2507static int gup_huge_pd(hugepd_t hugepd, unsigned long addr,
0cd22afd 2508 unsigned int pdshift, unsigned long end, unsigned int flags,
cbd34da7
CH
2509 struct page **pages, int *nr)
2510{
2511 pte_t *ptep;
2512 unsigned long sz = 1UL << hugepd_shift(hugepd);
2513 unsigned long next;
2514
2515 ptep = hugepte_offset(hugepd, addr, pdshift);
2516 do {
2517 next = hugepte_addr_end(addr, end, sz);
0cd22afd 2518 if (!gup_hugepte(ptep, sz, addr, end, flags, pages, nr))
cbd34da7
CH
2519 return 0;
2520 } while (ptep++, addr = next, addr != end);
2521
2522 return 1;
2523}
2524#else
2525static inline int gup_huge_pd(hugepd_t hugepd, unsigned long addr,
0cd22afd 2526 unsigned int pdshift, unsigned long end, unsigned int flags,
cbd34da7
CH
2527 struct page **pages, int *nr)
2528{
2529 return 0;
2530}
2531#endif /* CONFIG_ARCH_HAS_HUGEPD */
2532
2667f50e 2533static int gup_huge_pmd(pmd_t orig, pmd_t *pmdp, unsigned long addr,
0cd22afd
JH
2534 unsigned long end, unsigned int flags,
2535 struct page **pages, int *nr)
2667f50e 2536{
ddc58f27 2537 struct page *head, *page;
2667f50e
SC
2538 int refs;
2539
b798bec4 2540 if (!pmd_access_permitted(orig, flags & FOLL_WRITE))
2667f50e
SC
2541 return 0;
2542
7af75561
IW
2543 if (pmd_devmap(orig)) {
2544 if (unlikely(flags & FOLL_LONGTERM))
2545 return 0;
86dfbed4
JH
2546 return __gup_device_huge_pmd(orig, pmdp, addr, end, flags,
2547 pages, nr);
7af75561 2548 }
b59f65fa 2549
d63206ee 2550 page = pmd_page(orig) + ((addr & ~PMD_MASK) >> PAGE_SHIFT);
a43e9820 2551 refs = record_subpages(page, addr, end, pages + *nr);
2667f50e 2552
3faa52c0 2553 head = try_grab_compound_head(pmd_page(orig), refs, flags);
a43e9820 2554 if (!head)
2667f50e 2555 return 0;
2667f50e
SC
2556
2557 if (unlikely(pmd_val(orig) != pmd_val(*pmdp))) {
3b78d834 2558 put_compound_head(head, refs, flags);
2667f50e
SC
2559 return 0;
2560 }
2561
a43e9820 2562 *nr += refs;
e9348053 2563 SetPageReferenced(head);
2667f50e
SC
2564 return 1;
2565}
2566
2567static int gup_huge_pud(pud_t orig, pud_t *pudp, unsigned long addr,
86dfbed4
JH
2568 unsigned long end, unsigned int flags,
2569 struct page **pages, int *nr)
2667f50e 2570{
ddc58f27 2571 struct page *head, *page;
2667f50e
SC
2572 int refs;
2573
b798bec4 2574 if (!pud_access_permitted(orig, flags & FOLL_WRITE))
2667f50e
SC
2575 return 0;
2576
7af75561
IW
2577 if (pud_devmap(orig)) {
2578 if (unlikely(flags & FOLL_LONGTERM))
2579 return 0;
86dfbed4
JH
2580 return __gup_device_huge_pud(orig, pudp, addr, end, flags,
2581 pages, nr);
7af75561 2582 }
b59f65fa 2583
d63206ee 2584 page = pud_page(orig) + ((addr & ~PUD_MASK) >> PAGE_SHIFT);
a43e9820 2585 refs = record_subpages(page, addr, end, pages + *nr);
2667f50e 2586
3faa52c0 2587 head = try_grab_compound_head(pud_page(orig), refs, flags);
a43e9820 2588 if (!head)
2667f50e 2589 return 0;
2667f50e
SC
2590
2591 if (unlikely(pud_val(orig) != pud_val(*pudp))) {
3b78d834 2592 put_compound_head(head, refs, flags);
2667f50e
SC
2593 return 0;
2594 }
2595
a43e9820 2596 *nr += refs;
e9348053 2597 SetPageReferenced(head);
2667f50e
SC
2598 return 1;
2599}
2600
f30c59e9 2601static int gup_huge_pgd(pgd_t orig, pgd_t *pgdp, unsigned long addr,
b798bec4 2602 unsigned long end, unsigned int flags,
f30c59e9
AK
2603 struct page **pages, int *nr)
2604{
2605 int refs;
ddc58f27 2606 struct page *head, *page;
f30c59e9 2607
b798bec4 2608 if (!pgd_access_permitted(orig, flags & FOLL_WRITE))
f30c59e9
AK
2609 return 0;
2610
b59f65fa 2611 BUILD_BUG_ON(pgd_devmap(orig));
a43e9820 2612
d63206ee 2613 page = pgd_page(orig) + ((addr & ~PGDIR_MASK) >> PAGE_SHIFT);
a43e9820 2614 refs = record_subpages(page, addr, end, pages + *nr);
f30c59e9 2615
3faa52c0 2616 head = try_grab_compound_head(pgd_page(orig), refs, flags);
a43e9820 2617 if (!head)
f30c59e9 2618 return 0;
f30c59e9
AK
2619
2620 if (unlikely(pgd_val(orig) != pgd_val(*pgdp))) {
3b78d834 2621 put_compound_head(head, refs, flags);
f30c59e9
AK
2622 return 0;
2623 }
2624
a43e9820 2625 *nr += refs;
e9348053 2626 SetPageReferenced(head);
f30c59e9
AK
2627 return 1;
2628}
2629
d3f7b1bb 2630static int gup_pmd_range(pud_t *pudp, pud_t pud, unsigned long addr, unsigned long end,
b798bec4 2631 unsigned int flags, struct page **pages, int *nr)
2667f50e
SC
2632{
2633 unsigned long next;
2634 pmd_t *pmdp;
2635
d3f7b1bb 2636 pmdp = pmd_offset_lockless(pudp, pud, addr);
2667f50e 2637 do {
38c5ce93 2638 pmd_t pmd = READ_ONCE(*pmdp);
2667f50e
SC
2639
2640 next = pmd_addr_end(addr, end);
84c3fc4e 2641 if (!pmd_present(pmd))
2667f50e
SC
2642 return 0;
2643
414fd080
YZ
2644 if (unlikely(pmd_trans_huge(pmd) || pmd_huge(pmd) ||
2645 pmd_devmap(pmd))) {
2667f50e
SC
2646 /*
2647 * NUMA hinting faults need to be handled in the GUP
2648 * slowpath for accounting purposes and so that they
2649 * can be serialised against THP migration.
2650 */
8a0516ed 2651 if (pmd_protnone(pmd))
2667f50e
SC
2652 return 0;
2653
b798bec4 2654 if (!gup_huge_pmd(pmd, pmdp, addr, next, flags,
2667f50e
SC
2655 pages, nr))
2656 return 0;
2657
f30c59e9
AK
2658 } else if (unlikely(is_hugepd(__hugepd(pmd_val(pmd))))) {
2659 /*
2660 * architecture have different format for hugetlbfs
2661 * pmd format and THP pmd format
2662 */
2663 if (!gup_huge_pd(__hugepd(pmd_val(pmd)), addr,
b798bec4 2664 PMD_SHIFT, next, flags, pages, nr))
f30c59e9 2665 return 0;
b798bec4 2666 } else if (!gup_pte_range(pmd, addr, next, flags, pages, nr))
2923117b 2667 return 0;
2667f50e
SC
2668 } while (pmdp++, addr = next, addr != end);
2669
2670 return 1;
2671}
2672
d3f7b1bb 2673static int gup_pud_range(p4d_t *p4dp, p4d_t p4d, unsigned long addr, unsigned long end,
b798bec4 2674 unsigned int flags, struct page **pages, int *nr)
2667f50e
SC
2675{
2676 unsigned long next;
2677 pud_t *pudp;
2678
d3f7b1bb 2679 pudp = pud_offset_lockless(p4dp, p4d, addr);
2667f50e 2680 do {
e37c6982 2681 pud_t pud = READ_ONCE(*pudp);
2667f50e
SC
2682
2683 next = pud_addr_end(addr, end);
15494520 2684 if (unlikely(!pud_present(pud)))
2667f50e 2685 return 0;
f30c59e9 2686 if (unlikely(pud_huge(pud))) {
b798bec4 2687 if (!gup_huge_pud(pud, pudp, addr, next, flags,
f30c59e9
AK
2688 pages, nr))
2689 return 0;
2690 } else if (unlikely(is_hugepd(__hugepd(pud_val(pud))))) {
2691 if (!gup_huge_pd(__hugepd(pud_val(pud)), addr,
b798bec4 2692 PUD_SHIFT, next, flags, pages, nr))
2667f50e 2693 return 0;
d3f7b1bb 2694 } else if (!gup_pmd_range(pudp, pud, addr, next, flags, pages, nr))
2667f50e
SC
2695 return 0;
2696 } while (pudp++, addr = next, addr != end);
2697
2698 return 1;
2699}
2700
d3f7b1bb 2701static int gup_p4d_range(pgd_t *pgdp, pgd_t pgd, unsigned long addr, unsigned long end,
b798bec4 2702 unsigned int flags, struct page **pages, int *nr)
c2febafc
KS
2703{
2704 unsigned long next;
2705 p4d_t *p4dp;
2706
d3f7b1bb 2707 p4dp = p4d_offset_lockless(pgdp, pgd, addr);
c2febafc
KS
2708 do {
2709 p4d_t p4d = READ_ONCE(*p4dp);
2710
2711 next = p4d_addr_end(addr, end);
2712 if (p4d_none(p4d))
2713 return 0;
2714 BUILD_BUG_ON(p4d_huge(p4d));
2715 if (unlikely(is_hugepd(__hugepd(p4d_val(p4d))))) {
2716 if (!gup_huge_pd(__hugepd(p4d_val(p4d)), addr,
b798bec4 2717 P4D_SHIFT, next, flags, pages, nr))
c2febafc 2718 return 0;
d3f7b1bb 2719 } else if (!gup_pud_range(p4dp, p4d, addr, next, flags, pages, nr))
c2febafc
KS
2720 return 0;
2721 } while (p4dp++, addr = next, addr != end);
2722
2723 return 1;
2724}
2725
5b65c467 2726static void gup_pgd_range(unsigned long addr, unsigned long end,
b798bec4 2727 unsigned int flags, struct page **pages, int *nr)
5b65c467
KS
2728{
2729 unsigned long next;
2730 pgd_t *pgdp;
2731
2732 pgdp = pgd_offset(current->mm, addr);
2733 do {
2734 pgd_t pgd = READ_ONCE(*pgdp);
2735
2736 next = pgd_addr_end(addr, end);
2737 if (pgd_none(pgd))
2738 return;
2739 if (unlikely(pgd_huge(pgd))) {
b798bec4 2740 if (!gup_huge_pgd(pgd, pgdp, addr, next, flags,
5b65c467
KS
2741 pages, nr))
2742 return;
2743 } else if (unlikely(is_hugepd(__hugepd(pgd_val(pgd))))) {
2744 if (!gup_huge_pd(__hugepd(pgd_val(pgd)), addr,
b798bec4 2745 PGDIR_SHIFT, next, flags, pages, nr))
5b65c467 2746 return;
d3f7b1bb 2747 } else if (!gup_p4d_range(pgdp, pgd, addr, next, flags, pages, nr))
5b65c467
KS
2748 return;
2749 } while (pgdp++, addr = next, addr != end);
2750}
050a9adc
CH
2751#else
2752static inline void gup_pgd_range(unsigned long addr, unsigned long end,
2753 unsigned int flags, struct page **pages, int *nr)
2754{
2755}
2756#endif /* CONFIG_HAVE_FAST_GUP */
5b65c467
KS
2757
2758#ifndef gup_fast_permitted
2759/*
dadbb612 2760 * Check if it's allowed to use get_user_pages_fast_only() for the range, or
5b65c467
KS
2761 * we need to fall back to the slow version:
2762 */
26f4c328 2763static bool gup_fast_permitted(unsigned long start, unsigned long end)
5b65c467 2764{
26f4c328 2765 return true;
5b65c467
KS
2766}
2767#endif
2768
7af75561
IW
2769static int __gup_longterm_unlocked(unsigned long start, int nr_pages,
2770 unsigned int gup_flags, struct page **pages)
2771{
2772 int ret;
2773
2774 /*
2775 * FIXME: FOLL_LONGTERM does not work with
2776 * get_user_pages_unlocked() (see comments in that function)
2777 */
2778 if (gup_flags & FOLL_LONGTERM) {
d8ed45c5 2779 mmap_read_lock(current->mm);
64019a2e 2780 ret = __gup_longterm_locked(current->mm,
7af75561
IW
2781 start, nr_pages,
2782 pages, NULL, gup_flags);
d8ed45c5 2783 mmap_read_unlock(current->mm);
7af75561
IW
2784 } else {
2785 ret = get_user_pages_unlocked(start, nr_pages,
2786 pages, gup_flags);
2787 }
2788
2789 return ret;
2790}
2791
c28b1fc7
JG
2792static unsigned long lockless_pages_from_mm(unsigned long start,
2793 unsigned long end,
2794 unsigned int gup_flags,
2795 struct page **pages)
2796{
2797 unsigned long flags;
2798 int nr_pinned = 0;
57efa1fe 2799 unsigned seq;
c28b1fc7
JG
2800
2801 if (!IS_ENABLED(CONFIG_HAVE_FAST_GUP) ||
2802 !gup_fast_permitted(start, end))
2803 return 0;
2804
57efa1fe
JG
2805 if (gup_flags & FOLL_PIN) {
2806 seq = raw_read_seqcount(&current->mm->write_protect_seq);
2807 if (seq & 1)
2808 return 0;
2809 }
2810
c28b1fc7
JG
2811 /*
2812 * Disable interrupts. The nested form is used, in order to allow full,
2813 * general purpose use of this routine.
2814 *
2815 * With interrupts disabled, we block page table pages from being freed
2816 * from under us. See struct mmu_table_batch comments in
2817 * include/asm-generic/tlb.h for more details.
2818 *
2819 * We do not adopt an rcu_read_lock() here as we also want to block IPIs
2820 * that come from THPs splitting.
2821 */
2822 local_irq_save(flags);
2823 gup_pgd_range(start, end, gup_flags, pages, &nr_pinned);
2824 local_irq_restore(flags);
57efa1fe
JG
2825
2826 /*
2827 * When pinning pages for DMA there could be a concurrent write protect
2828 * from fork() via copy_page_range(), in this case always fail fast GUP.
2829 */
2830 if (gup_flags & FOLL_PIN) {
2831 if (read_seqcount_retry(&current->mm->write_protect_seq, seq)) {
2832 unpin_user_pages(pages, nr_pinned);
2833 return 0;
2834 }
2835 }
c28b1fc7
JG
2836 return nr_pinned;
2837}
2838
2839static int internal_get_user_pages_fast(unsigned long start,
2840 unsigned long nr_pages,
eddb1c22
JH
2841 unsigned int gup_flags,
2842 struct page **pages)
2667f50e 2843{
c28b1fc7
JG
2844 unsigned long len, end;
2845 unsigned long nr_pinned;
2846 int ret;
2667f50e 2847
f4000fdf 2848 if (WARN_ON_ONCE(gup_flags & ~(FOLL_WRITE | FOLL_LONGTERM |
376a34ef 2849 FOLL_FORCE | FOLL_PIN | FOLL_GET |
55b8fe70 2850 FOLL_FAST_ONLY | FOLL_NOFAULT)))
817be129
CH
2851 return -EINVAL;
2852
a458b76a
AA
2853 if (gup_flags & FOLL_PIN)
2854 mm_set_has_pinned_flag(&current->mm->flags);
008cfe44 2855
f81cd178 2856 if (!(gup_flags & FOLL_FAST_ONLY))
da1c55f1 2857 might_lock_read(&current->mm->mmap_lock);
f81cd178 2858
f455c854 2859 start = untagged_addr(start) & PAGE_MASK;
c28b1fc7
JG
2860 len = nr_pages << PAGE_SHIFT;
2861 if (check_add_overflow(start, len, &end))
c61611f7 2862 return 0;
96d4f267 2863 if (unlikely(!access_ok((void __user *)start, len)))
c61611f7 2864 return -EFAULT;
73e10a61 2865
c28b1fc7
JG
2866 nr_pinned = lockless_pages_from_mm(start, end, gup_flags, pages);
2867 if (nr_pinned == nr_pages || gup_flags & FOLL_FAST_ONLY)
2868 return nr_pinned;
2667f50e 2869
c28b1fc7
JG
2870 /* Slow path: try to get the remaining pages with get_user_pages */
2871 start += nr_pinned << PAGE_SHIFT;
2872 pages += nr_pinned;
2873 ret = __gup_longterm_unlocked(start, nr_pages - nr_pinned, gup_flags,
2874 pages);
2875 if (ret < 0) {
2876 /*
2877 * The caller has to unpin the pages we already pinned so
2878 * returning -errno is not an option
2879 */
2880 if (nr_pinned)
2881 return nr_pinned;
2882 return ret;
2667f50e 2883 }
c28b1fc7 2884 return ret + nr_pinned;
2667f50e 2885}
c28b1fc7 2886
dadbb612
SJ
2887/**
2888 * get_user_pages_fast_only() - pin user pages in memory
2889 * @start: starting user address
2890 * @nr_pages: number of pages from start to pin
2891 * @gup_flags: flags modifying pin behaviour
2892 * @pages: array that receives pointers to the pages pinned.
2893 * Should be at least nr_pages long.
2894 *
9e1f0580
JH
2895 * Like get_user_pages_fast() except it's IRQ-safe in that it won't fall back to
2896 * the regular GUP.
2897 * Note a difference with get_user_pages_fast: this always returns the
2898 * number of pages pinned, 0 if no pages were pinned.
2899 *
2900 * If the architecture does not support this function, simply return with no
2901 * pages pinned.
2902 *
2903 * Careful, careful! COW breaking can go either way, so a non-write
2904 * access can get ambiguous page results. If you call this function without
2905 * 'write' set, you'd better be sure that you're ok with that ambiguity.
2906 */
dadbb612
SJ
2907int get_user_pages_fast_only(unsigned long start, int nr_pages,
2908 unsigned int gup_flags, struct page **pages)
9e1f0580 2909{
376a34ef 2910 int nr_pinned;
9e1f0580
JH
2911 /*
2912 * Internally (within mm/gup.c), gup fast variants must set FOLL_GET,
2913 * because gup fast is always a "pin with a +1 page refcount" request.
376a34ef
JH
2914 *
2915 * FOLL_FAST_ONLY is required in order to match the API description of
2916 * this routine: no fall back to regular ("slow") GUP.
9e1f0580 2917 */
dadbb612 2918 gup_flags |= FOLL_GET | FOLL_FAST_ONLY;
9e1f0580 2919
376a34ef
JH
2920 nr_pinned = internal_get_user_pages_fast(start, nr_pages, gup_flags,
2921 pages);
9e1f0580
JH
2922
2923 /*
376a34ef
JH
2924 * As specified in the API description above, this routine is not
2925 * allowed to return negative values. However, the common core
2926 * routine internal_get_user_pages_fast() *can* return -errno.
2927 * Therefore, correct for that here:
9e1f0580 2928 */
376a34ef
JH
2929 if (nr_pinned < 0)
2930 nr_pinned = 0;
9e1f0580
JH
2931
2932 return nr_pinned;
2933}
dadbb612 2934EXPORT_SYMBOL_GPL(get_user_pages_fast_only);
9e1f0580 2935
eddb1c22
JH
2936/**
2937 * get_user_pages_fast() - pin user pages in memory
3faa52c0
JH
2938 * @start: starting user address
2939 * @nr_pages: number of pages from start to pin
2940 * @gup_flags: flags modifying pin behaviour
2941 * @pages: array that receives pointers to the pages pinned.
2942 * Should be at least nr_pages long.
eddb1c22 2943 *
c1e8d7c6 2944 * Attempt to pin user pages in memory without taking mm->mmap_lock.
eddb1c22
JH
2945 * If not successful, it will fall back to taking the lock and
2946 * calling get_user_pages().
2947 *
2948 * Returns number of pages pinned. This may be fewer than the number requested.
2949 * If nr_pages is 0 or negative, returns 0. If no pages were pinned, returns
2950 * -errno.
2951 */
2952int get_user_pages_fast(unsigned long start, int nr_pages,
2953 unsigned int gup_flags, struct page **pages)
2954{
447f3e45 2955 if (!is_valid_gup_flags(gup_flags))
eddb1c22
JH
2956 return -EINVAL;
2957
94202f12
JH
2958 /*
2959 * The caller may or may not have explicitly set FOLL_GET; either way is
2960 * OK. However, internally (within mm/gup.c), gup fast variants must set
2961 * FOLL_GET, because gup fast is always a "pin with a +1 page refcount"
2962 * request.
2963 */
2964 gup_flags |= FOLL_GET;
eddb1c22
JH
2965 return internal_get_user_pages_fast(start, nr_pages, gup_flags, pages);
2966}
050a9adc 2967EXPORT_SYMBOL_GPL(get_user_pages_fast);
eddb1c22
JH
2968
2969/**
2970 * pin_user_pages_fast() - pin user pages in memory without taking locks
2971 *
3faa52c0
JH
2972 * @start: starting user address
2973 * @nr_pages: number of pages from start to pin
2974 * @gup_flags: flags modifying pin behaviour
2975 * @pages: array that receives pointers to the pages pinned.
2976 * Should be at least nr_pages long.
2977 *
2978 * Nearly the same as get_user_pages_fast(), except that FOLL_PIN is set. See
2979 * get_user_pages_fast() for documentation on the function arguments, because
2980 * the arguments here are identical.
2981 *
2982 * FOLL_PIN means that the pages must be released via unpin_user_page(). Please
72ef5e52 2983 * see Documentation/core-api/pin_user_pages.rst for further details.
eddb1c22
JH
2984 */
2985int pin_user_pages_fast(unsigned long start, int nr_pages,
2986 unsigned int gup_flags, struct page **pages)
2987{
3faa52c0
JH
2988 /* FOLL_GET and FOLL_PIN are mutually exclusive. */
2989 if (WARN_ON_ONCE(gup_flags & FOLL_GET))
2990 return -EINVAL;
2991
2992 gup_flags |= FOLL_PIN;
2993 return internal_get_user_pages_fast(start, nr_pages, gup_flags, pages);
eddb1c22
JH
2994}
2995EXPORT_SYMBOL_GPL(pin_user_pages_fast);
2996
104acc32 2997/*
dadbb612
SJ
2998 * This is the FOLL_PIN equivalent of get_user_pages_fast_only(). Behavior
2999 * is the same, except that this one sets FOLL_PIN instead of FOLL_GET.
104acc32
JH
3000 *
3001 * The API rules are the same, too: no negative values may be returned.
3002 */
3003int pin_user_pages_fast_only(unsigned long start, int nr_pages,
3004 unsigned int gup_flags, struct page **pages)
3005{
3006 int nr_pinned;
3007
3008 /*
3009 * FOLL_GET and FOLL_PIN are mutually exclusive. Note that the API
3010 * rules require returning 0, rather than -errno:
3011 */
3012 if (WARN_ON_ONCE(gup_flags & FOLL_GET))
3013 return 0;
3014 /*
3015 * FOLL_FAST_ONLY is required in order to match the API description of
3016 * this routine: no fall back to regular ("slow") GUP.
3017 */
3018 gup_flags |= (FOLL_PIN | FOLL_FAST_ONLY);
3019 nr_pinned = internal_get_user_pages_fast(start, nr_pages, gup_flags,
3020 pages);
3021 /*
3022 * This routine is not allowed to return negative values. However,
3023 * internal_get_user_pages_fast() *can* return -errno. Therefore,
3024 * correct for that here:
3025 */
3026 if (nr_pinned < 0)
3027 nr_pinned = 0;
3028
3029 return nr_pinned;
3030}
3031EXPORT_SYMBOL_GPL(pin_user_pages_fast_only);
3032
eddb1c22 3033/**
64019a2e 3034 * pin_user_pages_remote() - pin pages of a remote process
eddb1c22 3035 *
3faa52c0
JH
3036 * @mm: mm_struct of target mm
3037 * @start: starting user address
3038 * @nr_pages: number of pages from start to pin
3039 * @gup_flags: flags modifying lookup behaviour
3040 * @pages: array that receives pointers to the pages pinned.
3041 * Should be at least nr_pages long. Or NULL, if caller
3042 * only intends to ensure the pages are faulted in.
3043 * @vmas: array of pointers to vmas corresponding to each page.
3044 * Or NULL if the caller does not require them.
3045 * @locked: pointer to lock flag indicating whether lock is held and
3046 * subsequently whether VM_FAULT_RETRY functionality can be
3047 * utilised. Lock must initially be held.
3048 *
3049 * Nearly the same as get_user_pages_remote(), except that FOLL_PIN is set. See
3050 * get_user_pages_remote() for documentation on the function arguments, because
3051 * the arguments here are identical.
3052 *
3053 * FOLL_PIN means that the pages must be released via unpin_user_page(). Please
72ef5e52 3054 * see Documentation/core-api/pin_user_pages.rst for details.
eddb1c22 3055 */
64019a2e 3056long pin_user_pages_remote(struct mm_struct *mm,
eddb1c22
JH
3057 unsigned long start, unsigned long nr_pages,
3058 unsigned int gup_flags, struct page **pages,
3059 struct vm_area_struct **vmas, int *locked)
3060{
3faa52c0
JH
3061 /* FOLL_GET and FOLL_PIN are mutually exclusive. */
3062 if (WARN_ON_ONCE(gup_flags & FOLL_GET))
3063 return -EINVAL;
3064
3065 gup_flags |= FOLL_PIN;
64019a2e 3066 return __get_user_pages_remote(mm, start, nr_pages, gup_flags,
3faa52c0 3067 pages, vmas, locked);
eddb1c22
JH
3068}
3069EXPORT_SYMBOL(pin_user_pages_remote);
3070
3071/**
3072 * pin_user_pages() - pin user pages in memory for use by other devices
3073 *
3faa52c0
JH
3074 * @start: starting user address
3075 * @nr_pages: number of pages from start to pin
3076 * @gup_flags: flags modifying lookup behaviour
3077 * @pages: array that receives pointers to the pages pinned.
3078 * Should be at least nr_pages long. Or NULL, if caller
3079 * only intends to ensure the pages are faulted in.
3080 * @vmas: array of pointers to vmas corresponding to each page.
3081 * Or NULL if the caller does not require them.
3082 *
3083 * Nearly the same as get_user_pages(), except that FOLL_TOUCH is not set, and
3084 * FOLL_PIN is set.
3085 *
3086 * FOLL_PIN means that the pages must be released via unpin_user_page(). Please
72ef5e52 3087 * see Documentation/core-api/pin_user_pages.rst for details.
eddb1c22
JH
3088 */
3089long pin_user_pages(unsigned long start, unsigned long nr_pages,
3090 unsigned int gup_flags, struct page **pages,
3091 struct vm_area_struct **vmas)
3092{
3faa52c0
JH
3093 /* FOLL_GET and FOLL_PIN are mutually exclusive. */
3094 if (WARN_ON_ONCE(gup_flags & FOLL_GET))
3095 return -EINVAL;
3096
3097 gup_flags |= FOLL_PIN;
64019a2e 3098 return __gup_longterm_locked(current->mm, start, nr_pages,
3faa52c0 3099 pages, vmas, gup_flags);
eddb1c22
JH
3100}
3101EXPORT_SYMBOL(pin_user_pages);
91429023
JH
3102
3103/*
3104 * pin_user_pages_unlocked() is the FOLL_PIN variant of
3105 * get_user_pages_unlocked(). Behavior is the same, except that this one sets
3106 * FOLL_PIN and rejects FOLL_GET.
3107 */
3108long pin_user_pages_unlocked(unsigned long start, unsigned long nr_pages,
3109 struct page **pages, unsigned int gup_flags)
3110{
3111 /* FOLL_GET and FOLL_PIN are mutually exclusive. */
3112 if (WARN_ON_ONCE(gup_flags & FOLL_GET))
3113 return -EINVAL;
3114
3115 gup_flags |= FOLL_PIN;
3116 return get_user_pages_unlocked(start, nr_pages, pages, gup_flags);
3117}
3118EXPORT_SYMBOL(pin_user_pages_unlocked);
420c2091
JH
3119
3120/*
3121 * pin_user_pages_locked() is the FOLL_PIN variant of get_user_pages_locked().
3122 * Behavior is the same, except that this one sets FOLL_PIN and rejects
3123 * FOLL_GET.
3124 */
3125long pin_user_pages_locked(unsigned long start, unsigned long nr_pages,
3126 unsigned int gup_flags, struct page **pages,
3127 int *locked)
3128{
3129 /*
3130 * FIXME: Current FOLL_LONGTERM behavior is incompatible with
3131 * FAULT_FLAG_ALLOW_RETRY because of the FS DAX check requirement on
3132 * vmas. As there are no users of this flag in this call we simply
3133 * disallow this option for now.
3134 */
3135 if (WARN_ON_ONCE(gup_flags & FOLL_LONGTERM))
3136 return -EINVAL;
3137
3138 /* FOLL_GET and FOLL_PIN are mutually exclusive. */
3139 if (WARN_ON_ONCE(gup_flags & FOLL_GET))
3140 return -EINVAL;
3141
3142 gup_flags |= FOLL_PIN;
64019a2e 3143 return __get_user_pages_locked(current->mm, start, nr_pages,
420c2091
JH
3144 pages, NULL, locked,
3145 gup_flags | FOLL_TOUCH);
3146}
3147EXPORT_SYMBOL(pin_user_pages_locked);