mm: fix typos in comments
[linux-block.git] / mm / gup.c
CommitLineData
457c8996 1// SPDX-License-Identifier: GPL-2.0-only
4bbd4c77
KS
2#include <linux/kernel.h>
3#include <linux/errno.h>
4#include <linux/err.h>
5#include <linux/spinlock.h>
6
4bbd4c77 7#include <linux/mm.h>
3565fce3 8#include <linux/memremap.h>
4bbd4c77
KS
9#include <linux/pagemap.h>
10#include <linux/rmap.h>
11#include <linux/swap.h>
12#include <linux/swapops.h>
13
174cd4b1 14#include <linux/sched/signal.h>
2667f50e 15#include <linux/rwsem.h>
f30c59e9 16#include <linux/hugetlb.h>
9a4e9f3b
AK
17#include <linux/migrate.h>
18#include <linux/mm_inline.h>
19#include <linux/sched/mm.h>
1027e443 20
33a709b2 21#include <asm/mmu_context.h>
1027e443 22#include <asm/tlbflush.h>
2667f50e 23
4bbd4c77
KS
24#include "internal.h"
25
df06b37f
KB
26struct follow_page_context {
27 struct dev_pagemap *pgmap;
28 unsigned int page_mask;
29};
30
47e29d32
JH
31static void hpage_pincount_add(struct page *page, int refs)
32{
33 VM_BUG_ON_PAGE(!hpage_pincount_available(page), page);
34 VM_BUG_ON_PAGE(page != compound_head(page), page);
35
36 atomic_add(refs, compound_pincount_ptr(page));
37}
38
39static void hpage_pincount_sub(struct page *page, int refs)
40{
41 VM_BUG_ON_PAGE(!hpage_pincount_available(page), page);
42 VM_BUG_ON_PAGE(page != compound_head(page), page);
43
44 atomic_sub(refs, compound_pincount_ptr(page));
45}
46
a707cdd5
JH
47/*
48 * Return the compound head page with ref appropriately incremented,
49 * or NULL if that failed.
50 */
51static inline struct page *try_get_compound_head(struct page *page, int refs)
52{
53 struct page *head = compound_head(page);
54
55 if (WARN_ON_ONCE(page_ref_count(head) < 0))
56 return NULL;
57 if (unlikely(!page_cache_add_speculative(head, refs)))
58 return NULL;
59 return head;
60}
61
3faa52c0
JH
62/*
63 * try_grab_compound_head() - attempt to elevate a page's refcount, by a
64 * flags-dependent amount.
65 *
66 * "grab" names in this file mean, "look at flags to decide whether to use
67 * FOLL_PIN or FOLL_GET behavior, when incrementing the page's refcount.
68 *
69 * Either FOLL_PIN or FOLL_GET (or neither) must be set, but not both at the
70 * same time. (That's true throughout the get_user_pages*() and
71 * pin_user_pages*() APIs.) Cases:
72 *
73 * FOLL_GET: page's refcount will be incremented by 1.
74 * FOLL_PIN: page's refcount will be incremented by GUP_PIN_COUNTING_BIAS.
75 *
76 * Return: head page (with refcount appropriately incremented) for success, or
77 * NULL upon failure. If neither FOLL_GET nor FOLL_PIN was set, that's
78 * considered failure, and furthermore, a likely bug in the caller, so a warning
79 * is also emitted.
80 */
0fa5bc40
JM
81__maybe_unused struct page *try_grab_compound_head(struct page *page,
82 int refs, unsigned int flags)
3faa52c0
JH
83{
84 if (flags & FOLL_GET)
85 return try_get_compound_head(page, refs);
86 else if (flags & FOLL_PIN) {
1970dc6f
JH
87 int orig_refs = refs;
88
df3a0a21 89 /*
d1e153fe
PT
90 * Can't do FOLL_LONGTERM + FOLL_PIN gup fast path if not in a
91 * right zone, so fail and let the caller fall back to the slow
92 * path.
df3a0a21 93 */
d1e153fe
PT
94 if (unlikely((flags & FOLL_LONGTERM) &&
95 !is_pinnable_page(page)))
df3a0a21
PL
96 return NULL;
97
47e29d32
JH
98 /*
99 * When pinning a compound page of order > 1 (which is what
100 * hpage_pincount_available() checks for), use an exact count to
101 * track it, via hpage_pincount_add/_sub().
102 *
103 * However, be sure to *also* increment the normal page refcount
104 * field at least once, so that the page really is pinned.
105 */
106 if (!hpage_pincount_available(page))
107 refs *= GUP_PIN_COUNTING_BIAS;
108
109 page = try_get_compound_head(page, refs);
110 if (!page)
111 return NULL;
112
113 if (hpage_pincount_available(page))
114 hpage_pincount_add(page, refs);
115
1970dc6f
JH
116 mod_node_page_state(page_pgdat(page), NR_FOLL_PIN_ACQUIRED,
117 orig_refs);
118
47e29d32 119 return page;
3faa52c0
JH
120 }
121
122 WARN_ON_ONCE(1);
123 return NULL;
124}
125
4509b42c
JG
126static void put_compound_head(struct page *page, int refs, unsigned int flags)
127{
128 if (flags & FOLL_PIN) {
129 mod_node_page_state(page_pgdat(page), NR_FOLL_PIN_RELEASED,
130 refs);
131
132 if (hpage_pincount_available(page))
133 hpage_pincount_sub(page, refs);
134 else
135 refs *= GUP_PIN_COUNTING_BIAS;
136 }
137
138 VM_BUG_ON_PAGE(page_ref_count(page) < refs, page);
139 /*
140 * Calling put_page() for each ref is unnecessarily slow. Only the last
141 * ref needs a put_page().
142 */
143 if (refs > 1)
144 page_ref_sub(page, refs - 1);
145 put_page(page);
146}
147
3faa52c0
JH
148/**
149 * try_grab_page() - elevate a page's refcount by a flag-dependent amount
150 *
151 * This might not do anything at all, depending on the flags argument.
152 *
153 * "grab" names in this file mean, "look at flags to decide whether to use
154 * FOLL_PIN or FOLL_GET behavior, when incrementing the page's refcount.
155 *
156 * @page: pointer to page to be grabbed
157 * @flags: gup flags: these are the FOLL_* flag values.
158 *
159 * Either FOLL_PIN or FOLL_GET (or neither) may be set, but not both at the same
160 * time. Cases:
161 *
162 * FOLL_GET: page's refcount will be incremented by 1.
163 * FOLL_PIN: page's refcount will be incremented by GUP_PIN_COUNTING_BIAS.
164 *
165 * Return: true for success, or if no action was required (if neither FOLL_PIN
166 * nor FOLL_GET was set, nothing is done). False for failure: FOLL_GET or
167 * FOLL_PIN was set, but the page could not be grabbed.
168 */
169bool __must_check try_grab_page(struct page *page, unsigned int flags)
170{
171 WARN_ON_ONCE((flags & (FOLL_GET | FOLL_PIN)) == (FOLL_GET | FOLL_PIN));
172
173 if (flags & FOLL_GET)
174 return try_get_page(page);
175 else if (flags & FOLL_PIN) {
47e29d32
JH
176 int refs = 1;
177
3faa52c0
JH
178 page = compound_head(page);
179
180 if (WARN_ON_ONCE(page_ref_count(page) <= 0))
181 return false;
182
47e29d32
JH
183 if (hpage_pincount_available(page))
184 hpage_pincount_add(page, 1);
185 else
186 refs = GUP_PIN_COUNTING_BIAS;
187
188 /*
189 * Similar to try_grab_compound_head(): even if using the
190 * hpage_pincount_add/_sub() routines, be sure to
191 * *also* increment the normal page refcount field at least
192 * once, so that the page really is pinned.
193 */
194 page_ref_add(page, refs);
1970dc6f
JH
195
196 mod_node_page_state(page_pgdat(page), NR_FOLL_PIN_ACQUIRED, 1);
3faa52c0
JH
197 }
198
199 return true;
200}
201
3faa52c0
JH
202/**
203 * unpin_user_page() - release a dma-pinned page
204 * @page: pointer to page to be released
205 *
206 * Pages that were pinned via pin_user_pages*() must be released via either
207 * unpin_user_page(), or one of the unpin_user_pages*() routines. This is so
208 * that such pages can be separately tracked and uniquely handled. In
209 * particular, interactions with RDMA and filesystems need special handling.
210 */
211void unpin_user_page(struct page *page)
212{
4509b42c 213 put_compound_head(compound_head(page), 1, FOLL_PIN);
3faa52c0
JH
214}
215EXPORT_SYMBOL(unpin_user_page);
216
458a4f78
JM
217static inline void compound_range_next(unsigned long i, unsigned long npages,
218 struct page **list, struct page **head,
219 unsigned int *ntails)
220{
221 struct page *next, *page;
222 unsigned int nr = 1;
223
224 if (i >= npages)
225 return;
226
227 next = *list + i;
228 page = compound_head(next);
229 if (PageCompound(page) && compound_order(page) >= 1)
230 nr = min_t(unsigned int,
231 page + compound_nr(page) - next, npages - i);
232
233 *head = page;
234 *ntails = nr;
235}
236
237#define for_each_compound_range(__i, __list, __npages, __head, __ntails) \
238 for (__i = 0, \
239 compound_range_next(__i, __npages, __list, &(__head), &(__ntails)); \
240 __i < __npages; __i += __ntails, \
241 compound_range_next(__i, __npages, __list, &(__head), &(__ntails)))
242
8745d7f6
JM
243static inline void compound_next(unsigned long i, unsigned long npages,
244 struct page **list, struct page **head,
245 unsigned int *ntails)
246{
247 struct page *page;
248 unsigned int nr;
249
250 if (i >= npages)
251 return;
252
253 page = compound_head(list[i]);
254 for (nr = i + 1; nr < npages; nr++) {
255 if (compound_head(list[nr]) != page)
256 break;
257 }
258
259 *head = page;
260 *ntails = nr - i;
261}
262
263#define for_each_compound_head(__i, __list, __npages, __head, __ntails) \
264 for (__i = 0, \
265 compound_next(__i, __npages, __list, &(__head), &(__ntails)); \
266 __i < __npages; __i += __ntails, \
267 compound_next(__i, __npages, __list, &(__head), &(__ntails)))
268
fc1d8e7c 269/**
f1f6a7dd 270 * unpin_user_pages_dirty_lock() - release and optionally dirty gup-pinned pages
2d15eb31 271 * @pages: array of pages to be maybe marked dirty, and definitely released.
fc1d8e7c 272 * @npages: number of pages in the @pages array.
2d15eb31 273 * @make_dirty: whether to mark the pages dirty
fc1d8e7c
JH
274 *
275 * "gup-pinned page" refers to a page that has had one of the get_user_pages()
276 * variants called on that page.
277 *
278 * For each page in the @pages array, make that page (or its head page, if a
2d15eb31 279 * compound page) dirty, if @make_dirty is true, and if the page was previously
f1f6a7dd
JH
280 * listed as clean. In any case, releases all pages using unpin_user_page(),
281 * possibly via unpin_user_pages(), for the non-dirty case.
fc1d8e7c 282 *
f1f6a7dd 283 * Please see the unpin_user_page() documentation for details.
fc1d8e7c 284 *
2d15eb31 285 * set_page_dirty_lock() is used internally. If instead, set_page_dirty() is
286 * required, then the caller should a) verify that this is really correct,
287 * because _lock() is usually required, and b) hand code it:
f1f6a7dd 288 * set_page_dirty_lock(), unpin_user_page().
fc1d8e7c
JH
289 *
290 */
f1f6a7dd
JH
291void unpin_user_pages_dirty_lock(struct page **pages, unsigned long npages,
292 bool make_dirty)
fc1d8e7c 293{
2d15eb31 294 unsigned long index;
31b912de
JM
295 struct page *head;
296 unsigned int ntails;
2d15eb31 297
298 if (!make_dirty) {
f1f6a7dd 299 unpin_user_pages(pages, npages);
2d15eb31 300 return;
301 }
302
31b912de 303 for_each_compound_head(index, pages, npages, head, ntails) {
2d15eb31 304 /*
305 * Checking PageDirty at this point may race with
306 * clear_page_dirty_for_io(), but that's OK. Two key
307 * cases:
308 *
309 * 1) This code sees the page as already dirty, so it
310 * skips the call to set_page_dirty(). That could happen
311 * because clear_page_dirty_for_io() called
312 * page_mkclean(), followed by set_page_dirty().
313 * However, now the page is going to get written back,
314 * which meets the original intention of setting it
315 * dirty, so all is well: clear_page_dirty_for_io() goes
316 * on to call TestClearPageDirty(), and write the page
317 * back.
318 *
319 * 2) This code sees the page as clean, so it calls
320 * set_page_dirty(). The page stays dirty, despite being
321 * written back, so it gets written back again in the
322 * next writeback cycle. This is harmless.
323 */
31b912de
JM
324 if (!PageDirty(head))
325 set_page_dirty_lock(head);
326 put_compound_head(head, ntails, FOLL_PIN);
2d15eb31 327 }
fc1d8e7c 328}
f1f6a7dd 329EXPORT_SYMBOL(unpin_user_pages_dirty_lock);
fc1d8e7c 330
458a4f78
JM
331/**
332 * unpin_user_page_range_dirty_lock() - release and optionally dirty
333 * gup-pinned page range
334 *
335 * @page: the starting page of a range maybe marked dirty, and definitely released.
336 * @npages: number of consecutive pages to release.
337 * @make_dirty: whether to mark the pages dirty
338 *
339 * "gup-pinned page range" refers to a range of pages that has had one of the
340 * pin_user_pages() variants called on that page.
341 *
342 * For the page ranges defined by [page .. page+npages], make that range (or
343 * its head pages, if a compound page) dirty, if @make_dirty is true, and if the
344 * page range was previously listed as clean.
345 *
346 * set_page_dirty_lock() is used internally. If instead, set_page_dirty() is
347 * required, then the caller should a) verify that this is really correct,
348 * because _lock() is usually required, and b) hand code it:
349 * set_page_dirty_lock(), unpin_user_page().
350 *
351 */
352void unpin_user_page_range_dirty_lock(struct page *page, unsigned long npages,
353 bool make_dirty)
354{
355 unsigned long index;
356 struct page *head;
357 unsigned int ntails;
358
359 for_each_compound_range(index, &page, npages, head, ntails) {
360 if (make_dirty && !PageDirty(head))
361 set_page_dirty_lock(head);
362 put_compound_head(head, ntails, FOLL_PIN);
363 }
364}
365EXPORT_SYMBOL(unpin_user_page_range_dirty_lock);
366
fc1d8e7c 367/**
f1f6a7dd 368 * unpin_user_pages() - release an array of gup-pinned pages.
fc1d8e7c
JH
369 * @pages: array of pages to be marked dirty and released.
370 * @npages: number of pages in the @pages array.
371 *
f1f6a7dd 372 * For each page in the @pages array, release the page using unpin_user_page().
fc1d8e7c 373 *
f1f6a7dd 374 * Please see the unpin_user_page() documentation for details.
fc1d8e7c 375 */
f1f6a7dd 376void unpin_user_pages(struct page **pages, unsigned long npages)
fc1d8e7c
JH
377{
378 unsigned long index;
31b912de
JM
379 struct page *head;
380 unsigned int ntails;
fc1d8e7c 381
146608bb
JH
382 /*
383 * If this WARN_ON() fires, then the system *might* be leaking pages (by
384 * leaving them pinned), but probably not. More likely, gup/pup returned
385 * a hard -ERRNO error to the caller, who erroneously passed it here.
386 */
387 if (WARN_ON(IS_ERR_VALUE(npages)))
388 return;
31b912de
JM
389
390 for_each_compound_head(index, pages, npages, head, ntails)
391 put_compound_head(head, ntails, FOLL_PIN);
fc1d8e7c 392}
f1f6a7dd 393EXPORT_SYMBOL(unpin_user_pages);
fc1d8e7c 394
050a9adc 395#ifdef CONFIG_MMU
69e68b4f
KS
396static struct page *no_page_table(struct vm_area_struct *vma,
397 unsigned int flags)
4bbd4c77 398{
69e68b4f
KS
399 /*
400 * When core dumping an enormous anonymous area that nobody
401 * has touched so far, we don't want to allocate unnecessary pages or
402 * page tables. Return error instead of NULL to skip handle_mm_fault,
403 * then get_dump_page() will return NULL to leave a hole in the dump.
404 * But we can only make this optimization where a hole would surely
405 * be zero-filled if handle_mm_fault() actually did handle it.
406 */
a0137f16
AK
407 if ((flags & FOLL_DUMP) &&
408 (vma_is_anonymous(vma) || !vma->vm_ops->fault))
69e68b4f
KS
409 return ERR_PTR(-EFAULT);
410 return NULL;
411}
4bbd4c77 412
1027e443
KS
413static int follow_pfn_pte(struct vm_area_struct *vma, unsigned long address,
414 pte_t *pte, unsigned int flags)
415{
416 /* No page to get reference */
417 if (flags & FOLL_GET)
418 return -EFAULT;
419
420 if (flags & FOLL_TOUCH) {
421 pte_t entry = *pte;
422
423 if (flags & FOLL_WRITE)
424 entry = pte_mkdirty(entry);
425 entry = pte_mkyoung(entry);
426
427 if (!pte_same(*pte, entry)) {
428 set_pte_at(vma->vm_mm, address, pte, entry);
429 update_mmu_cache(vma, address, pte);
430 }
431 }
432
433 /* Proper page table entry exists, but no corresponding struct page */
434 return -EEXIST;
435}
436
19be0eaf 437/*
a308c71b
PX
438 * FOLL_FORCE can write to even unwritable pte's, but only
439 * after we've gone through a COW cycle and they are dirty.
19be0eaf
LT
440 */
441static inline bool can_follow_write_pte(pte_t pte, unsigned int flags)
442{
a308c71b
PX
443 return pte_write(pte) ||
444 ((flags & FOLL_FORCE) && (flags & FOLL_COW) && pte_dirty(pte));
19be0eaf
LT
445}
446
69e68b4f 447static struct page *follow_page_pte(struct vm_area_struct *vma,
df06b37f
KB
448 unsigned long address, pmd_t *pmd, unsigned int flags,
449 struct dev_pagemap **pgmap)
69e68b4f
KS
450{
451 struct mm_struct *mm = vma->vm_mm;
452 struct page *page;
453 spinlock_t *ptl;
454 pte_t *ptep, pte;
f28d4363 455 int ret;
4bbd4c77 456
eddb1c22
JH
457 /* FOLL_GET and FOLL_PIN are mutually exclusive. */
458 if (WARN_ON_ONCE((flags & (FOLL_PIN | FOLL_GET)) ==
459 (FOLL_PIN | FOLL_GET)))
460 return ERR_PTR(-EINVAL);
69e68b4f 461retry:
4bbd4c77 462 if (unlikely(pmd_bad(*pmd)))
69e68b4f 463 return no_page_table(vma, flags);
4bbd4c77
KS
464
465 ptep = pte_offset_map_lock(mm, pmd, address, &ptl);
4bbd4c77
KS
466 pte = *ptep;
467 if (!pte_present(pte)) {
468 swp_entry_t entry;
469 /*
470 * KSM's break_ksm() relies upon recognizing a ksm page
471 * even while it is being migrated, so for that case we
472 * need migration_entry_wait().
473 */
474 if (likely(!(flags & FOLL_MIGRATION)))
475 goto no_page;
0661a336 476 if (pte_none(pte))
4bbd4c77
KS
477 goto no_page;
478 entry = pte_to_swp_entry(pte);
479 if (!is_migration_entry(entry))
480 goto no_page;
481 pte_unmap_unlock(ptep, ptl);
482 migration_entry_wait(mm, pmd, address);
69e68b4f 483 goto retry;
4bbd4c77 484 }
8a0516ed 485 if ((flags & FOLL_NUMA) && pte_protnone(pte))
4bbd4c77 486 goto no_page;
19be0eaf 487 if ((flags & FOLL_WRITE) && !can_follow_write_pte(pte, flags)) {
69e68b4f
KS
488 pte_unmap_unlock(ptep, ptl);
489 return NULL;
490 }
4bbd4c77
KS
491
492 page = vm_normal_page(vma, address, pte);
3faa52c0 493 if (!page && pte_devmap(pte) && (flags & (FOLL_GET | FOLL_PIN))) {
3565fce3 494 /*
3faa52c0
JH
495 * Only return device mapping pages in the FOLL_GET or FOLL_PIN
496 * case since they are only valid while holding the pgmap
497 * reference.
3565fce3 498 */
df06b37f
KB
499 *pgmap = get_dev_pagemap(pte_pfn(pte), *pgmap);
500 if (*pgmap)
3565fce3
DW
501 page = pte_page(pte);
502 else
503 goto no_page;
504 } else if (unlikely(!page)) {
1027e443
KS
505 if (flags & FOLL_DUMP) {
506 /* Avoid special (like zero) pages in core dumps */
507 page = ERR_PTR(-EFAULT);
508 goto out;
509 }
510
511 if (is_zero_pfn(pte_pfn(pte))) {
512 page = pte_page(pte);
513 } else {
1027e443
KS
514 ret = follow_pfn_pte(vma, address, ptep, flags);
515 page = ERR_PTR(ret);
516 goto out;
517 }
4bbd4c77
KS
518 }
519
3faa52c0
JH
520 /* try_grab_page() does nothing unless FOLL_GET or FOLL_PIN is set. */
521 if (unlikely(!try_grab_page(page, flags))) {
522 page = ERR_PTR(-ENOMEM);
523 goto out;
8fde12ca 524 }
f28d4363
CI
525 /*
526 * We need to make the page accessible if and only if we are going
527 * to access its content (the FOLL_PIN case). Please see
528 * Documentation/core-api/pin_user_pages.rst for details.
529 */
530 if (flags & FOLL_PIN) {
531 ret = arch_make_page_accessible(page);
532 if (ret) {
533 unpin_user_page(page);
534 page = ERR_PTR(ret);
535 goto out;
536 }
537 }
4bbd4c77
KS
538 if (flags & FOLL_TOUCH) {
539 if ((flags & FOLL_WRITE) &&
540 !pte_dirty(pte) && !PageDirty(page))
541 set_page_dirty(page);
542 /*
543 * pte_mkyoung() would be more correct here, but atomic care
544 * is needed to avoid losing the dirty bit: it is easier to use
545 * mark_page_accessed().
546 */
547 mark_page_accessed(page);
548 }
de60f5f1 549 if ((flags & FOLL_MLOCK) && (vma->vm_flags & VM_LOCKED)) {
e90309c9
KS
550 /* Do not mlock pte-mapped THP */
551 if (PageTransCompound(page))
552 goto out;
553
4bbd4c77
KS
554 /*
555 * The preliminary mapping check is mainly to avoid the
556 * pointless overhead of lock_page on the ZERO_PAGE
557 * which might bounce very badly if there is contention.
558 *
559 * If the page is already locked, we don't need to
560 * handle it now - vmscan will handle it later if and
561 * when it attempts to reclaim the page.
562 */
563 if (page->mapping && trylock_page(page)) {
564 lru_add_drain(); /* push cached pages to LRU */
565 /*
566 * Because we lock page here, and migration is
567 * blocked by the pte's page reference, and we
568 * know the page is still mapped, we don't even
569 * need to check for file-cache page truncation.
570 */
571 mlock_vma_page(page);
572 unlock_page(page);
573 }
574 }
1027e443 575out:
4bbd4c77 576 pte_unmap_unlock(ptep, ptl);
4bbd4c77 577 return page;
4bbd4c77
KS
578no_page:
579 pte_unmap_unlock(ptep, ptl);
580 if (!pte_none(pte))
69e68b4f
KS
581 return NULL;
582 return no_page_table(vma, flags);
583}
584
080dbb61
AK
585static struct page *follow_pmd_mask(struct vm_area_struct *vma,
586 unsigned long address, pud_t *pudp,
df06b37f
KB
587 unsigned int flags,
588 struct follow_page_context *ctx)
69e68b4f 589{
68827280 590 pmd_t *pmd, pmdval;
69e68b4f
KS
591 spinlock_t *ptl;
592 struct page *page;
593 struct mm_struct *mm = vma->vm_mm;
594
080dbb61 595 pmd = pmd_offset(pudp, address);
68827280
HY
596 /*
597 * The READ_ONCE() will stabilize the pmdval in a register or
598 * on the stack so that it will stop changing under the code.
599 */
600 pmdval = READ_ONCE(*pmd);
601 if (pmd_none(pmdval))
69e68b4f 602 return no_page_table(vma, flags);
be9d3045 603 if (pmd_huge(pmdval) && is_vm_hugetlb_page(vma)) {
e66f17ff
NH
604 page = follow_huge_pmd(mm, address, pmd, flags);
605 if (page)
606 return page;
607 return no_page_table(vma, flags);
69e68b4f 608 }
68827280 609 if (is_hugepd(__hugepd(pmd_val(pmdval)))) {
4dc71451 610 page = follow_huge_pd(vma, address,
68827280 611 __hugepd(pmd_val(pmdval)), flags,
4dc71451
AK
612 PMD_SHIFT);
613 if (page)
614 return page;
615 return no_page_table(vma, flags);
616 }
84c3fc4e 617retry:
68827280 618 if (!pmd_present(pmdval)) {
84c3fc4e
ZY
619 if (likely(!(flags & FOLL_MIGRATION)))
620 return no_page_table(vma, flags);
621 VM_BUG_ON(thp_migration_supported() &&
68827280
HY
622 !is_pmd_migration_entry(pmdval));
623 if (is_pmd_migration_entry(pmdval))
84c3fc4e 624 pmd_migration_entry_wait(mm, pmd);
68827280
HY
625 pmdval = READ_ONCE(*pmd);
626 /*
627 * MADV_DONTNEED may convert the pmd to null because
c1e8d7c6 628 * mmap_lock is held in read mode
68827280
HY
629 */
630 if (pmd_none(pmdval))
631 return no_page_table(vma, flags);
84c3fc4e
ZY
632 goto retry;
633 }
68827280 634 if (pmd_devmap(pmdval)) {
3565fce3 635 ptl = pmd_lock(mm, pmd);
df06b37f 636 page = follow_devmap_pmd(vma, address, pmd, flags, &ctx->pgmap);
3565fce3
DW
637 spin_unlock(ptl);
638 if (page)
639 return page;
640 }
68827280 641 if (likely(!pmd_trans_huge(pmdval)))
df06b37f 642 return follow_page_pte(vma, address, pmd, flags, &ctx->pgmap);
6742d293 643
68827280 644 if ((flags & FOLL_NUMA) && pmd_protnone(pmdval))
db08f203
AK
645 return no_page_table(vma, flags);
646
84c3fc4e 647retry_locked:
6742d293 648 ptl = pmd_lock(mm, pmd);
68827280
HY
649 if (unlikely(pmd_none(*pmd))) {
650 spin_unlock(ptl);
651 return no_page_table(vma, flags);
652 }
84c3fc4e
ZY
653 if (unlikely(!pmd_present(*pmd))) {
654 spin_unlock(ptl);
655 if (likely(!(flags & FOLL_MIGRATION)))
656 return no_page_table(vma, flags);
657 pmd_migration_entry_wait(mm, pmd);
658 goto retry_locked;
659 }
6742d293
KS
660 if (unlikely(!pmd_trans_huge(*pmd))) {
661 spin_unlock(ptl);
df06b37f 662 return follow_page_pte(vma, address, pmd, flags, &ctx->pgmap);
6742d293 663 }
4066c119 664 if (flags & FOLL_SPLIT_PMD) {
6742d293
KS
665 int ret;
666 page = pmd_page(*pmd);
667 if (is_huge_zero_page(page)) {
668 spin_unlock(ptl);
669 ret = 0;
78ddc534 670 split_huge_pmd(vma, pmd, address);
337d9abf
NH
671 if (pmd_trans_unstable(pmd))
672 ret = -EBUSY;
4066c119 673 } else {
bfe7b00d
SL
674 spin_unlock(ptl);
675 split_huge_pmd(vma, pmd, address);
676 ret = pte_alloc(mm, pmd) ? -ENOMEM : 0;
6742d293
KS
677 }
678
679 return ret ? ERR_PTR(ret) :
df06b37f 680 follow_page_pte(vma, address, pmd, flags, &ctx->pgmap);
69e68b4f 681 }
6742d293
KS
682 page = follow_trans_huge_pmd(vma, address, pmd, flags);
683 spin_unlock(ptl);
df06b37f 684 ctx->page_mask = HPAGE_PMD_NR - 1;
6742d293 685 return page;
4bbd4c77
KS
686}
687
080dbb61
AK
688static struct page *follow_pud_mask(struct vm_area_struct *vma,
689 unsigned long address, p4d_t *p4dp,
df06b37f
KB
690 unsigned int flags,
691 struct follow_page_context *ctx)
080dbb61
AK
692{
693 pud_t *pud;
694 spinlock_t *ptl;
695 struct page *page;
696 struct mm_struct *mm = vma->vm_mm;
697
698 pud = pud_offset(p4dp, address);
699 if (pud_none(*pud))
700 return no_page_table(vma, flags);
be9d3045 701 if (pud_huge(*pud) && is_vm_hugetlb_page(vma)) {
080dbb61
AK
702 page = follow_huge_pud(mm, address, pud, flags);
703 if (page)
704 return page;
705 return no_page_table(vma, flags);
706 }
4dc71451
AK
707 if (is_hugepd(__hugepd(pud_val(*pud)))) {
708 page = follow_huge_pd(vma, address,
709 __hugepd(pud_val(*pud)), flags,
710 PUD_SHIFT);
711 if (page)
712 return page;
713 return no_page_table(vma, flags);
714 }
080dbb61
AK
715 if (pud_devmap(*pud)) {
716 ptl = pud_lock(mm, pud);
df06b37f 717 page = follow_devmap_pud(vma, address, pud, flags, &ctx->pgmap);
080dbb61
AK
718 spin_unlock(ptl);
719 if (page)
720 return page;
721 }
722 if (unlikely(pud_bad(*pud)))
723 return no_page_table(vma, flags);
724
df06b37f 725 return follow_pmd_mask(vma, address, pud, flags, ctx);
080dbb61
AK
726}
727
080dbb61
AK
728static struct page *follow_p4d_mask(struct vm_area_struct *vma,
729 unsigned long address, pgd_t *pgdp,
df06b37f
KB
730 unsigned int flags,
731 struct follow_page_context *ctx)
080dbb61
AK
732{
733 p4d_t *p4d;
4dc71451 734 struct page *page;
080dbb61
AK
735
736 p4d = p4d_offset(pgdp, address);
737 if (p4d_none(*p4d))
738 return no_page_table(vma, flags);
739 BUILD_BUG_ON(p4d_huge(*p4d));
740 if (unlikely(p4d_bad(*p4d)))
741 return no_page_table(vma, flags);
742
4dc71451
AK
743 if (is_hugepd(__hugepd(p4d_val(*p4d)))) {
744 page = follow_huge_pd(vma, address,
745 __hugepd(p4d_val(*p4d)), flags,
746 P4D_SHIFT);
747 if (page)
748 return page;
749 return no_page_table(vma, flags);
750 }
df06b37f 751 return follow_pud_mask(vma, address, p4d, flags, ctx);
080dbb61
AK
752}
753
754/**
755 * follow_page_mask - look up a page descriptor from a user-virtual address
756 * @vma: vm_area_struct mapping @address
757 * @address: virtual address to look up
758 * @flags: flags modifying lookup behaviour
78179556
MR
759 * @ctx: contains dev_pagemap for %ZONE_DEVICE memory pinning and a
760 * pointer to output page_mask
080dbb61
AK
761 *
762 * @flags can have FOLL_ flags set, defined in <linux/mm.h>
763 *
78179556
MR
764 * When getting pages from ZONE_DEVICE memory, the @ctx->pgmap caches
765 * the device's dev_pagemap metadata to avoid repeating expensive lookups.
766 *
767 * On output, the @ctx->page_mask is set according to the size of the page.
768 *
769 * Return: the mapped (struct page *), %NULL if no mapping exists, or
080dbb61
AK
770 * an error pointer if there is a mapping to something not represented
771 * by a page descriptor (see also vm_normal_page()).
772 */
a7030aea 773static struct page *follow_page_mask(struct vm_area_struct *vma,
080dbb61 774 unsigned long address, unsigned int flags,
df06b37f 775 struct follow_page_context *ctx)
080dbb61
AK
776{
777 pgd_t *pgd;
778 struct page *page;
779 struct mm_struct *mm = vma->vm_mm;
780
df06b37f 781 ctx->page_mask = 0;
080dbb61
AK
782
783 /* make this handle hugepd */
784 page = follow_huge_addr(mm, address, flags & FOLL_WRITE);
785 if (!IS_ERR(page)) {
3faa52c0 786 WARN_ON_ONCE(flags & (FOLL_GET | FOLL_PIN));
080dbb61
AK
787 return page;
788 }
789
790 pgd = pgd_offset(mm, address);
791
792 if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd)))
793 return no_page_table(vma, flags);
794
faaa5b62
AK
795 if (pgd_huge(*pgd)) {
796 page = follow_huge_pgd(mm, address, pgd, flags);
797 if (page)
798 return page;
799 return no_page_table(vma, flags);
800 }
4dc71451
AK
801 if (is_hugepd(__hugepd(pgd_val(*pgd)))) {
802 page = follow_huge_pd(vma, address,
803 __hugepd(pgd_val(*pgd)), flags,
804 PGDIR_SHIFT);
805 if (page)
806 return page;
807 return no_page_table(vma, flags);
808 }
faaa5b62 809
df06b37f
KB
810 return follow_p4d_mask(vma, address, pgd, flags, ctx);
811}
812
813struct page *follow_page(struct vm_area_struct *vma, unsigned long address,
814 unsigned int foll_flags)
815{
816 struct follow_page_context ctx = { NULL };
817 struct page *page;
818
819 page = follow_page_mask(vma, address, foll_flags, &ctx);
820 if (ctx.pgmap)
821 put_dev_pagemap(ctx.pgmap);
822 return page;
080dbb61
AK
823}
824
f2b495ca
KS
825static int get_gate_page(struct mm_struct *mm, unsigned long address,
826 unsigned int gup_flags, struct vm_area_struct **vma,
827 struct page **page)
828{
829 pgd_t *pgd;
c2febafc 830 p4d_t *p4d;
f2b495ca
KS
831 pud_t *pud;
832 pmd_t *pmd;
833 pte_t *pte;
834 int ret = -EFAULT;
835
836 /* user gate pages are read-only */
837 if (gup_flags & FOLL_WRITE)
838 return -EFAULT;
839 if (address > TASK_SIZE)
840 pgd = pgd_offset_k(address);
841 else
842 pgd = pgd_offset_gate(mm, address);
b5d1c39f
AL
843 if (pgd_none(*pgd))
844 return -EFAULT;
c2febafc 845 p4d = p4d_offset(pgd, address);
b5d1c39f
AL
846 if (p4d_none(*p4d))
847 return -EFAULT;
c2febafc 848 pud = pud_offset(p4d, address);
b5d1c39f
AL
849 if (pud_none(*pud))
850 return -EFAULT;
f2b495ca 851 pmd = pmd_offset(pud, address);
84c3fc4e 852 if (!pmd_present(*pmd))
f2b495ca
KS
853 return -EFAULT;
854 VM_BUG_ON(pmd_trans_huge(*pmd));
855 pte = pte_offset_map(pmd, address);
856 if (pte_none(*pte))
857 goto unmap;
858 *vma = get_gate_vma(mm);
859 if (!page)
860 goto out;
861 *page = vm_normal_page(*vma, address, *pte);
862 if (!*page) {
863 if ((gup_flags & FOLL_DUMP) || !is_zero_pfn(pte_pfn(*pte)))
864 goto unmap;
865 *page = pte_page(*pte);
866 }
9fa2dd94 867 if (unlikely(!try_grab_page(*page, gup_flags))) {
8fde12ca
LT
868 ret = -ENOMEM;
869 goto unmap;
870 }
f2b495ca
KS
871out:
872 ret = 0;
873unmap:
874 pte_unmap(pte);
875 return ret;
876}
877
9a95f3cf 878/*
c1e8d7c6
ML
879 * mmap_lock must be held on entry. If @locked != NULL and *@flags
880 * does not include FOLL_NOWAIT, the mmap_lock may be released. If it
4f6da934 881 * is, *@locked will be set to 0 and -EBUSY returned.
9a95f3cf 882 */
64019a2e 883static int faultin_page(struct vm_area_struct *vma,
4f6da934 884 unsigned long address, unsigned int *flags, int *locked)
16744483 885{
16744483 886 unsigned int fault_flags = 0;
2b740303 887 vm_fault_t ret;
16744483 888
de60f5f1
EM
889 /* mlock all present pages, but do not fault in new pages */
890 if ((*flags & (FOLL_POPULATE | FOLL_MLOCK)) == FOLL_MLOCK)
891 return -ENOENT;
16744483
KS
892 if (*flags & FOLL_WRITE)
893 fault_flags |= FAULT_FLAG_WRITE;
1b2ee126
DH
894 if (*flags & FOLL_REMOTE)
895 fault_flags |= FAULT_FLAG_REMOTE;
4f6da934 896 if (locked)
71335f37 897 fault_flags |= FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
16744483
KS
898 if (*flags & FOLL_NOWAIT)
899 fault_flags |= FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_RETRY_NOWAIT;
234b239b 900 if (*flags & FOLL_TRIED) {
4426e945
PX
901 /*
902 * Note: FAULT_FLAG_ALLOW_RETRY and FAULT_FLAG_TRIED
903 * can co-exist
904 */
234b239b
ALC
905 fault_flags |= FAULT_FLAG_TRIED;
906 }
16744483 907
bce617ed 908 ret = handle_mm_fault(vma, address, fault_flags, NULL);
16744483 909 if (ret & VM_FAULT_ERROR) {
9a291a7c
JM
910 int err = vm_fault_to_errno(ret, *flags);
911
912 if (err)
913 return err;
16744483
KS
914 BUG();
915 }
916
16744483 917 if (ret & VM_FAULT_RETRY) {
4f6da934
PX
918 if (locked && !(fault_flags & FAULT_FLAG_RETRY_NOWAIT))
919 *locked = 0;
16744483
KS
920 return -EBUSY;
921 }
922
923 /*
924 * The VM_FAULT_WRITE bit tells us that do_wp_page has broken COW when
925 * necessary, even if maybe_mkwrite decided not to set pte_write. We
926 * can thus safely do subsequent page lookups as if they were reads.
927 * But only do so when looping for pte_write is futile: in some cases
928 * userspace may also be wanting to write to the gotten user page,
929 * which a read fault here might prevent (a readonly page might get
930 * reCOWed by userspace write).
931 */
932 if ((ret & VM_FAULT_WRITE) && !(vma->vm_flags & VM_WRITE))
2923117b 933 *flags |= FOLL_COW;
16744483
KS
934 return 0;
935}
936
fa5bb209
KS
937static int check_vma_flags(struct vm_area_struct *vma, unsigned long gup_flags)
938{
939 vm_flags_t vm_flags = vma->vm_flags;
1b2ee126
DH
940 int write = (gup_flags & FOLL_WRITE);
941 int foreign = (gup_flags & FOLL_REMOTE);
fa5bb209
KS
942
943 if (vm_flags & (VM_IO | VM_PFNMAP))
944 return -EFAULT;
945
7f7ccc2c
WT
946 if (gup_flags & FOLL_ANON && !vma_is_anonymous(vma))
947 return -EFAULT;
948
52650c8b
JG
949 if ((gup_flags & FOLL_LONGTERM) && vma_is_fsdax(vma))
950 return -EOPNOTSUPP;
951
1b2ee126 952 if (write) {
fa5bb209
KS
953 if (!(vm_flags & VM_WRITE)) {
954 if (!(gup_flags & FOLL_FORCE))
955 return -EFAULT;
956 /*
957 * We used to let the write,force case do COW in a
958 * VM_MAYWRITE VM_SHARED !VM_WRITE vma, so ptrace could
959 * set a breakpoint in a read-only mapping of an
960 * executable, without corrupting the file (yet only
961 * when that file had been opened for writing!).
962 * Anon pages in shared mappings are surprising: now
963 * just reject it.
964 */
46435364 965 if (!is_cow_mapping(vm_flags))
fa5bb209 966 return -EFAULT;
fa5bb209
KS
967 }
968 } else if (!(vm_flags & VM_READ)) {
969 if (!(gup_flags & FOLL_FORCE))
970 return -EFAULT;
971 /*
972 * Is there actually any vma we can reach here which does not
973 * have VM_MAYREAD set?
974 */
975 if (!(vm_flags & VM_MAYREAD))
976 return -EFAULT;
977 }
d61172b4
DH
978 /*
979 * gups are always data accesses, not instruction
980 * fetches, so execute=false here
981 */
982 if (!arch_vma_access_permitted(vma, write, false, foreign))
33a709b2 983 return -EFAULT;
fa5bb209
KS
984 return 0;
985}
986
4bbd4c77
KS
987/**
988 * __get_user_pages() - pin user pages in memory
4bbd4c77
KS
989 * @mm: mm_struct of target mm
990 * @start: starting user address
991 * @nr_pages: number of pages from start to pin
992 * @gup_flags: flags modifying pin behaviour
993 * @pages: array that receives pointers to the pages pinned.
994 * Should be at least nr_pages long. Or NULL, if caller
995 * only intends to ensure the pages are faulted in.
996 * @vmas: array of pointers to vmas corresponding to each page.
997 * Or NULL if the caller does not require them.
c1e8d7c6 998 * @locked: whether we're still with the mmap_lock held
4bbd4c77 999 *
d2dfbe47
LX
1000 * Returns either number of pages pinned (which may be less than the
1001 * number requested), or an error. Details about the return value:
1002 *
1003 * -- If nr_pages is 0, returns 0.
1004 * -- If nr_pages is >0, but no pages were pinned, returns -errno.
1005 * -- If nr_pages is >0, and some pages were pinned, returns the number of
1006 * pages pinned. Again, this may be less than nr_pages.
2d3a36a4 1007 * -- 0 return value is possible when the fault would need to be retried.
d2dfbe47
LX
1008 *
1009 * The caller is responsible for releasing returned @pages, via put_page().
1010 *
c1e8d7c6 1011 * @vmas are valid only as long as mmap_lock is held.
4bbd4c77 1012 *
c1e8d7c6 1013 * Must be called with mmap_lock held. It may be released. See below.
4bbd4c77
KS
1014 *
1015 * __get_user_pages walks a process's page tables and takes a reference to
1016 * each struct page that each user address corresponds to at a given
1017 * instant. That is, it takes the page that would be accessed if a user
1018 * thread accesses the given user virtual address at that instant.
1019 *
1020 * This does not guarantee that the page exists in the user mappings when
1021 * __get_user_pages returns, and there may even be a completely different
1022 * page there in some cases (eg. if mmapped pagecache has been invalidated
1023 * and subsequently re faulted). However it does guarantee that the page
1024 * won't be freed completely. And mostly callers simply care that the page
1025 * contains data that was valid *at some point in time*. Typically, an IO
1026 * or similar operation cannot guarantee anything stronger anyway because
1027 * locks can't be held over the syscall boundary.
1028 *
1029 * If @gup_flags & FOLL_WRITE == 0, the page must not be written to. If
1030 * the page is written to, set_page_dirty (or set_page_dirty_lock, as
1031 * appropriate) must be called after the page is finished with, and
1032 * before put_page is called.
1033 *
c1e8d7c6 1034 * If @locked != NULL, *@locked will be set to 0 when mmap_lock is
4f6da934
PX
1035 * released by an up_read(). That can happen if @gup_flags does not
1036 * have FOLL_NOWAIT.
9a95f3cf 1037 *
4f6da934 1038 * A caller using such a combination of @locked and @gup_flags
c1e8d7c6 1039 * must therefore hold the mmap_lock for reading only, and recognize
9a95f3cf
PC
1040 * when it's been released. Otherwise, it must be held for either
1041 * reading or writing and will not be released.
4bbd4c77
KS
1042 *
1043 * In most cases, get_user_pages or get_user_pages_fast should be used
1044 * instead of __get_user_pages. __get_user_pages should be used only if
1045 * you need some special @gup_flags.
1046 */
64019a2e 1047static long __get_user_pages(struct mm_struct *mm,
4bbd4c77
KS
1048 unsigned long start, unsigned long nr_pages,
1049 unsigned int gup_flags, struct page **pages,
4f6da934 1050 struct vm_area_struct **vmas, int *locked)
4bbd4c77 1051{
df06b37f 1052 long ret = 0, i = 0;
fa5bb209 1053 struct vm_area_struct *vma = NULL;
df06b37f 1054 struct follow_page_context ctx = { NULL };
4bbd4c77
KS
1055
1056 if (!nr_pages)
1057 return 0;
1058
f9652594
AK
1059 start = untagged_addr(start);
1060
eddb1c22 1061 VM_BUG_ON(!!pages != !!(gup_flags & (FOLL_GET | FOLL_PIN)));
4bbd4c77
KS
1062
1063 /*
1064 * If FOLL_FORCE is set then do not force a full fault as the hinting
1065 * fault information is unrelated to the reference behaviour of a task
1066 * using the address space
1067 */
1068 if (!(gup_flags & FOLL_FORCE))
1069 gup_flags |= FOLL_NUMA;
1070
4bbd4c77 1071 do {
fa5bb209
KS
1072 struct page *page;
1073 unsigned int foll_flags = gup_flags;
1074 unsigned int page_increm;
1075
1076 /* first iteration or cross vma bound */
1077 if (!vma || start >= vma->vm_end) {
1078 vma = find_extend_vma(mm, start);
1079 if (!vma && in_gate_area(mm, start)) {
fa5bb209
KS
1080 ret = get_gate_page(mm, start & PAGE_MASK,
1081 gup_flags, &vma,
1082 pages ? &pages[i] : NULL);
1083 if (ret)
08be37b7 1084 goto out;
df06b37f 1085 ctx.page_mask = 0;
fa5bb209
KS
1086 goto next_page;
1087 }
4bbd4c77 1088
52650c8b 1089 if (!vma) {
df06b37f
KB
1090 ret = -EFAULT;
1091 goto out;
1092 }
52650c8b
JG
1093 ret = check_vma_flags(vma, gup_flags);
1094 if (ret)
1095 goto out;
1096
fa5bb209
KS
1097 if (is_vm_hugetlb_page(vma)) {
1098 i = follow_hugetlb_page(mm, vma, pages, vmas,
1099 &start, &nr_pages, i,
a308c71b 1100 gup_flags, locked);
ad415db8
PX
1101 if (locked && *locked == 0) {
1102 /*
1103 * We've got a VM_FAULT_RETRY
c1e8d7c6 1104 * and we've lost mmap_lock.
ad415db8
PX
1105 * We must stop here.
1106 */
1107 BUG_ON(gup_flags & FOLL_NOWAIT);
1108 BUG_ON(ret != 0);
1109 goto out;
1110 }
fa5bb209 1111 continue;
4bbd4c77 1112 }
fa5bb209
KS
1113 }
1114retry:
1115 /*
1116 * If we have a pending SIGKILL, don't keep faulting pages and
1117 * potentially allocating memory.
1118 */
fa45f116 1119 if (fatal_signal_pending(current)) {
d180870d 1120 ret = -EINTR;
df06b37f
KB
1121 goto out;
1122 }
fa5bb209 1123 cond_resched();
df06b37f
KB
1124
1125 page = follow_page_mask(vma, start, foll_flags, &ctx);
fa5bb209 1126 if (!page) {
64019a2e 1127 ret = faultin_page(vma, start, &foll_flags, locked);
fa5bb209
KS
1128 switch (ret) {
1129 case 0:
1130 goto retry;
df06b37f
KB
1131 case -EBUSY:
1132 ret = 0;
e4a9bc58 1133 fallthrough;
fa5bb209
KS
1134 case -EFAULT:
1135 case -ENOMEM:
1136 case -EHWPOISON:
df06b37f 1137 goto out;
fa5bb209
KS
1138 case -ENOENT:
1139 goto next_page;
4bbd4c77 1140 }
fa5bb209 1141 BUG();
1027e443
KS
1142 } else if (PTR_ERR(page) == -EEXIST) {
1143 /*
1144 * Proper page table entry exists, but no corresponding
1145 * struct page.
1146 */
1147 goto next_page;
1148 } else if (IS_ERR(page)) {
df06b37f
KB
1149 ret = PTR_ERR(page);
1150 goto out;
1027e443 1151 }
fa5bb209
KS
1152 if (pages) {
1153 pages[i] = page;
1154 flush_anon_page(vma, page, start);
1155 flush_dcache_page(page);
df06b37f 1156 ctx.page_mask = 0;
4bbd4c77 1157 }
4bbd4c77 1158next_page:
fa5bb209
KS
1159 if (vmas) {
1160 vmas[i] = vma;
df06b37f 1161 ctx.page_mask = 0;
fa5bb209 1162 }
df06b37f 1163 page_increm = 1 + (~(start >> PAGE_SHIFT) & ctx.page_mask);
fa5bb209
KS
1164 if (page_increm > nr_pages)
1165 page_increm = nr_pages;
1166 i += page_increm;
1167 start += page_increm * PAGE_SIZE;
1168 nr_pages -= page_increm;
4bbd4c77 1169 } while (nr_pages);
df06b37f
KB
1170out:
1171 if (ctx.pgmap)
1172 put_dev_pagemap(ctx.pgmap);
1173 return i ? i : ret;
4bbd4c77 1174}
4bbd4c77 1175
771ab430
TK
1176static bool vma_permits_fault(struct vm_area_struct *vma,
1177 unsigned int fault_flags)
d4925e00 1178{
1b2ee126
DH
1179 bool write = !!(fault_flags & FAULT_FLAG_WRITE);
1180 bool foreign = !!(fault_flags & FAULT_FLAG_REMOTE);
33a709b2 1181 vm_flags_t vm_flags = write ? VM_WRITE : VM_READ;
d4925e00
DH
1182
1183 if (!(vm_flags & vma->vm_flags))
1184 return false;
1185
33a709b2
DH
1186 /*
1187 * The architecture might have a hardware protection
1b2ee126 1188 * mechanism other than read/write that can deny access.
d61172b4
DH
1189 *
1190 * gup always represents data access, not instruction
1191 * fetches, so execute=false here:
33a709b2 1192 */
d61172b4 1193 if (!arch_vma_access_permitted(vma, write, false, foreign))
33a709b2
DH
1194 return false;
1195
d4925e00
DH
1196 return true;
1197}
1198
adc8cb40 1199/**
4bbd4c77 1200 * fixup_user_fault() - manually resolve a user page fault
4bbd4c77
KS
1201 * @mm: mm_struct of target mm
1202 * @address: user address
1203 * @fault_flags:flags to pass down to handle_mm_fault()
c1e8d7c6 1204 * @unlocked: did we unlock the mmap_lock while retrying, maybe NULL if caller
548b6a1e
MC
1205 * does not allow retry. If NULL, the caller must guarantee
1206 * that fault_flags does not contain FAULT_FLAG_ALLOW_RETRY.
4bbd4c77
KS
1207 *
1208 * This is meant to be called in the specific scenario where for locking reasons
1209 * we try to access user memory in atomic context (within a pagefault_disable()
1210 * section), this returns -EFAULT, and we want to resolve the user fault before
1211 * trying again.
1212 *
1213 * Typically this is meant to be used by the futex code.
1214 *
1215 * The main difference with get_user_pages() is that this function will
1216 * unconditionally call handle_mm_fault() which will in turn perform all the
1217 * necessary SW fixup of the dirty and young bits in the PTE, while
4a9e1cda 1218 * get_user_pages() only guarantees to update these in the struct page.
4bbd4c77
KS
1219 *
1220 * This is important for some architectures where those bits also gate the
1221 * access permission to the page because they are maintained in software. On
1222 * such architectures, gup() will not be enough to make a subsequent access
1223 * succeed.
1224 *
c1e8d7c6
ML
1225 * This function will not return with an unlocked mmap_lock. So it has not the
1226 * same semantics wrt the @mm->mmap_lock as does filemap_fault().
4bbd4c77 1227 */
64019a2e 1228int fixup_user_fault(struct mm_struct *mm,
4a9e1cda
DD
1229 unsigned long address, unsigned int fault_flags,
1230 bool *unlocked)
4bbd4c77
KS
1231{
1232 struct vm_area_struct *vma;
2b740303 1233 vm_fault_t ret, major = 0;
4a9e1cda 1234
f9652594
AK
1235 address = untagged_addr(address);
1236
4a9e1cda 1237 if (unlocked)
71335f37 1238 fault_flags |= FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
4bbd4c77 1239
4a9e1cda 1240retry:
4bbd4c77
KS
1241 vma = find_extend_vma(mm, address);
1242 if (!vma || address < vma->vm_start)
1243 return -EFAULT;
1244
d4925e00 1245 if (!vma_permits_fault(vma, fault_flags))
4bbd4c77
KS
1246 return -EFAULT;
1247
475f4dfc
PX
1248 if ((fault_flags & FAULT_FLAG_KILLABLE) &&
1249 fatal_signal_pending(current))
1250 return -EINTR;
1251
bce617ed 1252 ret = handle_mm_fault(vma, address, fault_flags, NULL);
4a9e1cda 1253 major |= ret & VM_FAULT_MAJOR;
4bbd4c77 1254 if (ret & VM_FAULT_ERROR) {
9a291a7c
JM
1255 int err = vm_fault_to_errno(ret, 0);
1256
1257 if (err)
1258 return err;
4bbd4c77
KS
1259 BUG();
1260 }
4a9e1cda
DD
1261
1262 if (ret & VM_FAULT_RETRY) {
d8ed45c5 1263 mmap_read_lock(mm);
475f4dfc
PX
1264 *unlocked = true;
1265 fault_flags |= FAULT_FLAG_TRIED;
1266 goto retry;
4a9e1cda
DD
1267 }
1268
4bbd4c77
KS
1269 return 0;
1270}
add6a0cd 1271EXPORT_SYMBOL_GPL(fixup_user_fault);
4bbd4c77 1272
2d3a36a4
MH
1273/*
1274 * Please note that this function, unlike __get_user_pages will not
1275 * return 0 for nr_pages > 0 without FOLL_NOWAIT
1276 */
64019a2e 1277static __always_inline long __get_user_pages_locked(struct mm_struct *mm,
f0818f47
AA
1278 unsigned long start,
1279 unsigned long nr_pages,
f0818f47
AA
1280 struct page **pages,
1281 struct vm_area_struct **vmas,
e716712f 1282 int *locked,
0fd71a56 1283 unsigned int flags)
f0818f47 1284{
f0818f47
AA
1285 long ret, pages_done;
1286 bool lock_dropped;
1287
1288 if (locked) {
1289 /* if VM_FAULT_RETRY can be returned, vmas become invalid */
1290 BUG_ON(vmas);
1291 /* check caller initialized locked */
1292 BUG_ON(*locked != 1);
1293 }
1294
008cfe44 1295 if (flags & FOLL_PIN)
a4d63c37 1296 atomic_set(&mm->has_pinned, 1);
008cfe44 1297
eddb1c22
JH
1298 /*
1299 * FOLL_PIN and FOLL_GET are mutually exclusive. Traditional behavior
1300 * is to set FOLL_GET if the caller wants pages[] filled in (but has
1301 * carelessly failed to specify FOLL_GET), so keep doing that, but only
1302 * for FOLL_GET, not for the newer FOLL_PIN.
1303 *
1304 * FOLL_PIN always expects pages to be non-null, but no need to assert
1305 * that here, as any failures will be obvious enough.
1306 */
1307 if (pages && !(flags & FOLL_PIN))
f0818f47 1308 flags |= FOLL_GET;
f0818f47
AA
1309
1310 pages_done = 0;
1311 lock_dropped = false;
1312 for (;;) {
64019a2e 1313 ret = __get_user_pages(mm, start, nr_pages, flags, pages,
f0818f47
AA
1314 vmas, locked);
1315 if (!locked)
1316 /* VM_FAULT_RETRY couldn't trigger, bypass */
1317 return ret;
1318
1319 /* VM_FAULT_RETRY cannot return errors */
1320 if (!*locked) {
1321 BUG_ON(ret < 0);
1322 BUG_ON(ret >= nr_pages);
1323 }
1324
f0818f47
AA
1325 if (ret > 0) {
1326 nr_pages -= ret;
1327 pages_done += ret;
1328 if (!nr_pages)
1329 break;
1330 }
1331 if (*locked) {
96312e61
AA
1332 /*
1333 * VM_FAULT_RETRY didn't trigger or it was a
1334 * FOLL_NOWAIT.
1335 */
f0818f47
AA
1336 if (!pages_done)
1337 pages_done = ret;
1338 break;
1339 }
df17277b
MR
1340 /*
1341 * VM_FAULT_RETRY triggered, so seek to the faulting offset.
1342 * For the prefault case (!pages) we only update counts.
1343 */
1344 if (likely(pages))
1345 pages += ret;
f0818f47 1346 start += ret << PAGE_SHIFT;
4426e945 1347 lock_dropped = true;
f0818f47 1348
4426e945 1349retry:
f0818f47
AA
1350 /*
1351 * Repeat on the address that fired VM_FAULT_RETRY
4426e945
PX
1352 * with both FAULT_FLAG_ALLOW_RETRY and
1353 * FAULT_FLAG_TRIED. Note that GUP can be interrupted
1354 * by fatal signals, so we need to check it before we
1355 * start trying again otherwise it can loop forever.
f0818f47 1356 */
4426e945 1357
ae46d2aa
HD
1358 if (fatal_signal_pending(current)) {
1359 if (!pages_done)
1360 pages_done = -EINTR;
4426e945 1361 break;
ae46d2aa 1362 }
4426e945 1363
d8ed45c5 1364 ret = mmap_read_lock_killable(mm);
71335f37
PX
1365 if (ret) {
1366 BUG_ON(ret > 0);
1367 if (!pages_done)
1368 pages_done = ret;
1369 break;
1370 }
4426e945 1371
c7b6a566 1372 *locked = 1;
64019a2e 1373 ret = __get_user_pages(mm, start, 1, flags | FOLL_TRIED,
4426e945
PX
1374 pages, NULL, locked);
1375 if (!*locked) {
1376 /* Continue to retry until we succeeded */
1377 BUG_ON(ret != 0);
1378 goto retry;
1379 }
f0818f47
AA
1380 if (ret != 1) {
1381 BUG_ON(ret > 1);
1382 if (!pages_done)
1383 pages_done = ret;
1384 break;
1385 }
1386 nr_pages--;
1387 pages_done++;
1388 if (!nr_pages)
1389 break;
df17277b
MR
1390 if (likely(pages))
1391 pages++;
f0818f47
AA
1392 start += PAGE_SIZE;
1393 }
e716712f 1394 if (lock_dropped && *locked) {
f0818f47
AA
1395 /*
1396 * We must let the caller know we temporarily dropped the lock
1397 * and so the critical section protected by it was lost.
1398 */
d8ed45c5 1399 mmap_read_unlock(mm);
f0818f47
AA
1400 *locked = 0;
1401 }
1402 return pages_done;
1403}
1404
d3649f68
CH
1405/**
1406 * populate_vma_page_range() - populate a range of pages in the vma.
1407 * @vma: target vma
1408 * @start: start address
1409 * @end: end address
c1e8d7c6 1410 * @locked: whether the mmap_lock is still held
d3649f68
CH
1411 *
1412 * This takes care of mlocking the pages too if VM_LOCKED is set.
1413 *
0a36f7f8
TY
1414 * Return either number of pages pinned in the vma, or a negative error
1415 * code on error.
d3649f68 1416 *
c1e8d7c6 1417 * vma->vm_mm->mmap_lock must be held.
d3649f68 1418 *
4f6da934 1419 * If @locked is NULL, it may be held for read or write and will
d3649f68
CH
1420 * be unperturbed.
1421 *
4f6da934
PX
1422 * If @locked is non-NULL, it must held for read only and may be
1423 * released. If it's released, *@locked will be set to 0.
d3649f68
CH
1424 */
1425long populate_vma_page_range(struct vm_area_struct *vma,
4f6da934 1426 unsigned long start, unsigned long end, int *locked)
d3649f68
CH
1427{
1428 struct mm_struct *mm = vma->vm_mm;
1429 unsigned long nr_pages = (end - start) / PAGE_SIZE;
1430 int gup_flags;
1431
1432 VM_BUG_ON(start & ~PAGE_MASK);
1433 VM_BUG_ON(end & ~PAGE_MASK);
1434 VM_BUG_ON_VMA(start < vma->vm_start, vma);
1435 VM_BUG_ON_VMA(end > vma->vm_end, vma);
42fc5414 1436 mmap_assert_locked(mm);
d3649f68
CH
1437
1438 gup_flags = FOLL_TOUCH | FOLL_POPULATE | FOLL_MLOCK;
1439 if (vma->vm_flags & VM_LOCKONFAULT)
1440 gup_flags &= ~FOLL_POPULATE;
1441 /*
1442 * We want to touch writable mappings with a write fault in order
1443 * to break COW, except for shared mappings because these don't COW
1444 * and we would not want to dirty them for nothing.
1445 */
1446 if ((vma->vm_flags & (VM_WRITE | VM_SHARED)) == VM_WRITE)
1447 gup_flags |= FOLL_WRITE;
1448
1449 /*
1450 * We want mlock to succeed for regions that have any permissions
1451 * other than PROT_NONE.
1452 */
3122e80e 1453 if (vma_is_accessible(vma))
d3649f68
CH
1454 gup_flags |= FOLL_FORCE;
1455
1456 /*
1457 * We made sure addr is within a VMA, so the following will
1458 * not result in a stack expansion that recurses back here.
1459 */
64019a2e 1460 return __get_user_pages(mm, start, nr_pages, gup_flags,
4f6da934 1461 NULL, NULL, locked);
d3649f68
CH
1462}
1463
1464/*
1465 * __mm_populate - populate and/or mlock pages within a range of address space.
1466 *
1467 * This is used to implement mlock() and the MAP_POPULATE / MAP_LOCKED mmap
1468 * flags. VMAs must be already marked with the desired vm_flags, and
c1e8d7c6 1469 * mmap_lock must not be held.
d3649f68
CH
1470 */
1471int __mm_populate(unsigned long start, unsigned long len, int ignore_errors)
1472{
1473 struct mm_struct *mm = current->mm;
1474 unsigned long end, nstart, nend;
1475 struct vm_area_struct *vma = NULL;
1476 int locked = 0;
1477 long ret = 0;
1478
1479 end = start + len;
1480
1481 for (nstart = start; nstart < end; nstart = nend) {
1482 /*
1483 * We want to fault in pages for [nstart; end) address range.
1484 * Find first corresponding VMA.
1485 */
1486 if (!locked) {
1487 locked = 1;
d8ed45c5 1488 mmap_read_lock(mm);
d3649f68
CH
1489 vma = find_vma(mm, nstart);
1490 } else if (nstart >= vma->vm_end)
1491 vma = vma->vm_next;
1492 if (!vma || vma->vm_start >= end)
1493 break;
1494 /*
1495 * Set [nstart; nend) to intersection of desired address
1496 * range with the first VMA. Also, skip undesirable VMA types.
1497 */
1498 nend = min(end, vma->vm_end);
1499 if (vma->vm_flags & (VM_IO | VM_PFNMAP))
1500 continue;
1501 if (nstart < vma->vm_start)
1502 nstart = vma->vm_start;
1503 /*
1504 * Now fault in a range of pages. populate_vma_page_range()
1505 * double checks the vma flags, so that it won't mlock pages
1506 * if the vma was already munlocked.
1507 */
1508 ret = populate_vma_page_range(vma, nstart, nend, &locked);
1509 if (ret < 0) {
1510 if (ignore_errors) {
1511 ret = 0;
1512 continue; /* continue at next VMA */
1513 }
1514 break;
1515 }
1516 nend = nstart + ret * PAGE_SIZE;
1517 ret = 0;
1518 }
1519 if (locked)
d8ed45c5 1520 mmap_read_unlock(mm);
d3649f68
CH
1521 return ret; /* 0 or negative error code */
1522}
050a9adc 1523#else /* CONFIG_MMU */
64019a2e 1524static long __get_user_pages_locked(struct mm_struct *mm, unsigned long start,
050a9adc
CH
1525 unsigned long nr_pages, struct page **pages,
1526 struct vm_area_struct **vmas, int *locked,
1527 unsigned int foll_flags)
1528{
1529 struct vm_area_struct *vma;
1530 unsigned long vm_flags;
24dc20c7 1531 long i;
050a9adc
CH
1532
1533 /* calculate required read or write permissions.
1534 * If FOLL_FORCE is set, we only require the "MAY" flags.
1535 */
1536 vm_flags = (foll_flags & FOLL_WRITE) ?
1537 (VM_WRITE | VM_MAYWRITE) : (VM_READ | VM_MAYREAD);
1538 vm_flags &= (foll_flags & FOLL_FORCE) ?
1539 (VM_MAYREAD | VM_MAYWRITE) : (VM_READ | VM_WRITE);
1540
1541 for (i = 0; i < nr_pages; i++) {
1542 vma = find_vma(mm, start);
1543 if (!vma)
1544 goto finish_or_fault;
1545
1546 /* protect what we can, including chardevs */
1547 if ((vma->vm_flags & (VM_IO | VM_PFNMAP)) ||
1548 !(vm_flags & vma->vm_flags))
1549 goto finish_or_fault;
1550
1551 if (pages) {
1552 pages[i] = virt_to_page(start);
1553 if (pages[i])
1554 get_page(pages[i]);
1555 }
1556 if (vmas)
1557 vmas[i] = vma;
1558 start = (start + PAGE_SIZE) & PAGE_MASK;
1559 }
1560
1561 return i;
1562
1563finish_or_fault:
1564 return i ? : -EFAULT;
1565}
1566#endif /* !CONFIG_MMU */
d3649f68 1567
8f942eea
JH
1568/**
1569 * get_dump_page() - pin user page in memory while writing it to core dump
1570 * @addr: user address
1571 *
1572 * Returns struct page pointer of user page pinned for dump,
1573 * to be freed afterwards by put_page().
1574 *
1575 * Returns NULL on any kind of failure - a hole must then be inserted into
1576 * the corefile, to preserve alignment with its headers; and also returns
1577 * NULL wherever the ZERO_PAGE, or an anonymous pte_none, has been found -
f0953a1b 1578 * allowing a hole to be left in the corefile to save disk space.
8f942eea 1579 *
7f3bfab5 1580 * Called without mmap_lock (takes and releases the mmap_lock by itself).
8f942eea
JH
1581 */
1582#ifdef CONFIG_ELF_CORE
1583struct page *get_dump_page(unsigned long addr)
1584{
7f3bfab5 1585 struct mm_struct *mm = current->mm;
8f942eea 1586 struct page *page;
7f3bfab5
JH
1587 int locked = 1;
1588 int ret;
8f942eea 1589
7f3bfab5 1590 if (mmap_read_lock_killable(mm))
8f942eea 1591 return NULL;
7f3bfab5
JH
1592 ret = __get_user_pages_locked(mm, addr, 1, &page, NULL, &locked,
1593 FOLL_FORCE | FOLL_DUMP | FOLL_GET);
1594 if (locked)
1595 mmap_read_unlock(mm);
d3378e86
AY
1596
1597 if (ret == 1 && is_page_poisoned(page))
1598 return NULL;
1599
7f3bfab5 1600 return (ret == 1) ? page : NULL;
8f942eea
JH
1601}
1602#endif /* CONFIG_ELF_CORE */
1603
d1e153fe 1604#ifdef CONFIG_MIGRATION
f68749ec
PT
1605/*
1606 * Check whether all pages are pinnable, if so return number of pages. If some
1607 * pages are not pinnable, migrate them, and unpin all pages. Return zero if
1608 * pages were migrated, or if some pages were not successfully isolated.
1609 * Return negative error if migration fails.
1610 */
1611static long check_and_migrate_movable_pages(unsigned long nr_pages,
d1e153fe 1612 struct page **pages,
d1e153fe 1613 unsigned int gup_flags)
9a4e9f3b 1614{
f68749ec
PT
1615 unsigned long i;
1616 unsigned long isolation_error_count = 0;
1617 bool drain_allow = true;
d1e153fe 1618 LIST_HEAD(movable_page_list);
f68749ec
PT
1619 long ret = 0;
1620 struct page *prev_head = NULL;
1621 struct page *head;
ed03d924
JK
1622 struct migration_target_control mtc = {
1623 .nid = NUMA_NO_NODE,
c991ffef 1624 .gfp_mask = GFP_USER | __GFP_NOWARN,
ed03d924 1625 };
9a4e9f3b 1626
83c02c23
PT
1627 for (i = 0; i < nr_pages; i++) {
1628 head = compound_head(pages[i]);
1629 if (head == prev_head)
1630 continue;
1631 prev_head = head;
9a4e9f3b 1632 /*
d1e153fe
PT
1633 * If we get a movable page, since we are going to be pinning
1634 * these entries, try to move them out if possible.
9a4e9f3b 1635 */
d1e153fe 1636 if (!is_pinnable_page(head)) {
6e7f34eb 1637 if (PageHuge(head)) {
d1e153fe 1638 if (!isolate_huge_page(head, &movable_page_list))
6e7f34eb
PT
1639 isolation_error_count++;
1640 } else {
9a4e9f3b
AK
1641 if (!PageLRU(head) && drain_allow) {
1642 lru_add_drain_all();
1643 drain_allow = false;
1644 }
1645
6e7f34eb
PT
1646 if (isolate_lru_page(head)) {
1647 isolation_error_count++;
1648 continue;
9a4e9f3b 1649 }
d1e153fe 1650 list_add_tail(&head->lru, &movable_page_list);
6e7f34eb
PT
1651 mod_node_page_state(page_pgdat(head),
1652 NR_ISOLATED_ANON +
1653 page_is_file_lru(head),
1654 thp_nr_pages(head));
9a4e9f3b
AK
1655 }
1656 }
1657 }
1658
6e7f34eb
PT
1659 /*
1660 * If list is empty, and no isolation errors, means that all pages are
1661 * in the correct zone.
1662 */
d1e153fe 1663 if (list_empty(&movable_page_list) && !isolation_error_count)
f68749ec 1664 return nr_pages;
6e7f34eb 1665
f68749ec
PT
1666 if (gup_flags & FOLL_PIN) {
1667 unpin_user_pages(pages, nr_pages);
1668 } else {
1669 for (i = 0; i < nr_pages; i++)
1670 put_page(pages[i]);
1671 }
d1e153fe 1672 if (!list_empty(&movable_page_list)) {
d1e153fe 1673 ret = migrate_pages(&movable_page_list, alloc_migration_target,
f0f44638 1674 NULL, (unsigned long)&mtc, MIGRATE_SYNC,
d1e153fe 1675 MR_LONGTERM_PIN);
f68749ec
PT
1676 if (ret && !list_empty(&movable_page_list))
1677 putback_movable_pages(&movable_page_list);
9a4e9f3b
AK
1678 }
1679
f68749ec 1680 return ret > 0 ? -ENOMEM : ret;
9a4e9f3b
AK
1681}
1682#else
f68749ec 1683static long check_and_migrate_movable_pages(unsigned long nr_pages,
d1e153fe 1684 struct page **pages,
d1e153fe 1685 unsigned int gup_flags)
9a4e9f3b
AK
1686{
1687 return nr_pages;
1688}
d1e153fe 1689#endif /* CONFIG_MIGRATION */
9a4e9f3b 1690
2bb6d283 1691/*
932f4a63
IW
1692 * __gup_longterm_locked() is a wrapper for __get_user_pages_locked which
1693 * allows us to process the FOLL_LONGTERM flag.
2bb6d283 1694 */
64019a2e 1695static long __gup_longterm_locked(struct mm_struct *mm,
932f4a63
IW
1696 unsigned long start,
1697 unsigned long nr_pages,
1698 struct page **pages,
1699 struct vm_area_struct **vmas,
1700 unsigned int gup_flags)
2bb6d283 1701{
f68749ec 1702 unsigned int flags;
52650c8b 1703 long rc;
2bb6d283 1704
f68749ec
PT
1705 if (!(gup_flags & FOLL_LONGTERM))
1706 return __get_user_pages_locked(mm, start, nr_pages, pages, vmas,
1707 NULL, gup_flags);
1708 flags = memalloc_pin_save();
1709 do {
1710 rc = __get_user_pages_locked(mm, start, nr_pages, pages, vmas,
1711 NULL, gup_flags);
1712 if (rc <= 0)
1713 break;
1714 rc = check_and_migrate_movable_pages(rc, pages, gup_flags);
1715 } while (!rc);
1716 memalloc_pin_restore(flags);
2bb6d283 1717
2bb6d283
DW
1718 return rc;
1719}
932f4a63 1720
447f3e45
BS
1721static bool is_valid_gup_flags(unsigned int gup_flags)
1722{
1723 /*
1724 * FOLL_PIN must only be set internally by the pin_user_pages*() APIs,
1725 * never directly by the caller, so enforce that with an assertion:
1726 */
1727 if (WARN_ON_ONCE(gup_flags & FOLL_PIN))
1728 return false;
1729 /*
1730 * FOLL_PIN is a prerequisite to FOLL_LONGTERM. Another way of saying
1731 * that is, FOLL_LONGTERM is a specific case, more restrictive case of
1732 * FOLL_PIN.
1733 */
1734 if (WARN_ON_ONCE(gup_flags & FOLL_LONGTERM))
1735 return false;
1736
1737 return true;
1738}
1739
22bf29b6 1740#ifdef CONFIG_MMU
64019a2e 1741static long __get_user_pages_remote(struct mm_struct *mm,
22bf29b6
JH
1742 unsigned long start, unsigned long nr_pages,
1743 unsigned int gup_flags, struct page **pages,
1744 struct vm_area_struct **vmas, int *locked)
1745{
1746 /*
1747 * Parts of FOLL_LONGTERM behavior are incompatible with
1748 * FAULT_FLAG_ALLOW_RETRY because of the FS DAX check requirement on
1749 * vmas. However, this only comes up if locked is set, and there are
1750 * callers that do request FOLL_LONGTERM, but do not set locked. So,
1751 * allow what we can.
1752 */
1753 if (gup_flags & FOLL_LONGTERM) {
1754 if (WARN_ON_ONCE(locked))
1755 return -EINVAL;
1756 /*
1757 * This will check the vmas (even if our vmas arg is NULL)
1758 * and return -ENOTSUPP if DAX isn't allowed in this case:
1759 */
64019a2e 1760 return __gup_longterm_locked(mm, start, nr_pages, pages,
22bf29b6
JH
1761 vmas, gup_flags | FOLL_TOUCH |
1762 FOLL_REMOTE);
1763 }
1764
64019a2e 1765 return __get_user_pages_locked(mm, start, nr_pages, pages, vmas,
22bf29b6
JH
1766 locked,
1767 gup_flags | FOLL_TOUCH | FOLL_REMOTE);
1768}
1769
adc8cb40 1770/**
c4237f8b 1771 * get_user_pages_remote() - pin user pages in memory
c4237f8b
JH
1772 * @mm: mm_struct of target mm
1773 * @start: starting user address
1774 * @nr_pages: number of pages from start to pin
1775 * @gup_flags: flags modifying lookup behaviour
1776 * @pages: array that receives pointers to the pages pinned.
1777 * Should be at least nr_pages long. Or NULL, if caller
1778 * only intends to ensure the pages are faulted in.
1779 * @vmas: array of pointers to vmas corresponding to each page.
1780 * Or NULL if the caller does not require them.
1781 * @locked: pointer to lock flag indicating whether lock is held and
1782 * subsequently whether VM_FAULT_RETRY functionality can be
1783 * utilised. Lock must initially be held.
1784 *
1785 * Returns either number of pages pinned (which may be less than the
1786 * number requested), or an error. Details about the return value:
1787 *
1788 * -- If nr_pages is 0, returns 0.
1789 * -- If nr_pages is >0, but no pages were pinned, returns -errno.
1790 * -- If nr_pages is >0, and some pages were pinned, returns the number of
1791 * pages pinned. Again, this may be less than nr_pages.
1792 *
1793 * The caller is responsible for releasing returned @pages, via put_page().
1794 *
c1e8d7c6 1795 * @vmas are valid only as long as mmap_lock is held.
c4237f8b 1796 *
c1e8d7c6 1797 * Must be called with mmap_lock held for read or write.
c4237f8b 1798 *
adc8cb40
SJ
1799 * get_user_pages_remote walks a process's page tables and takes a reference
1800 * to each struct page that each user address corresponds to at a given
c4237f8b
JH
1801 * instant. That is, it takes the page that would be accessed if a user
1802 * thread accesses the given user virtual address at that instant.
1803 *
1804 * This does not guarantee that the page exists in the user mappings when
adc8cb40 1805 * get_user_pages_remote returns, and there may even be a completely different
c4237f8b
JH
1806 * page there in some cases (eg. if mmapped pagecache has been invalidated
1807 * and subsequently re faulted). However it does guarantee that the page
1808 * won't be freed completely. And mostly callers simply care that the page
1809 * contains data that was valid *at some point in time*. Typically, an IO
1810 * or similar operation cannot guarantee anything stronger anyway because
1811 * locks can't be held over the syscall boundary.
1812 *
1813 * If gup_flags & FOLL_WRITE == 0, the page must not be written to. If the page
1814 * is written to, set_page_dirty (or set_page_dirty_lock, as appropriate) must
1815 * be called after the page is finished with, and before put_page is called.
1816 *
adc8cb40
SJ
1817 * get_user_pages_remote is typically used for fewer-copy IO operations,
1818 * to get a handle on the memory by some means other than accesses
1819 * via the user virtual addresses. The pages may be submitted for
1820 * DMA to devices or accessed via their kernel linear mapping (via the
1821 * kmap APIs). Care should be taken to use the correct cache flushing APIs.
c4237f8b
JH
1822 *
1823 * See also get_user_pages_fast, for performance critical applications.
1824 *
adc8cb40 1825 * get_user_pages_remote should be phased out in favor of
c4237f8b 1826 * get_user_pages_locked|unlocked or get_user_pages_fast. Nothing
adc8cb40 1827 * should use get_user_pages_remote because it cannot pass
c4237f8b
JH
1828 * FAULT_FLAG_ALLOW_RETRY to handle_mm_fault.
1829 */
64019a2e 1830long get_user_pages_remote(struct mm_struct *mm,
c4237f8b
JH
1831 unsigned long start, unsigned long nr_pages,
1832 unsigned int gup_flags, struct page **pages,
1833 struct vm_area_struct **vmas, int *locked)
1834{
447f3e45 1835 if (!is_valid_gup_flags(gup_flags))
eddb1c22
JH
1836 return -EINVAL;
1837
64019a2e 1838 return __get_user_pages_remote(mm, start, nr_pages, gup_flags,
22bf29b6 1839 pages, vmas, locked);
c4237f8b
JH
1840}
1841EXPORT_SYMBOL(get_user_pages_remote);
1842
eddb1c22 1843#else /* CONFIG_MMU */
64019a2e 1844long get_user_pages_remote(struct mm_struct *mm,
eddb1c22
JH
1845 unsigned long start, unsigned long nr_pages,
1846 unsigned int gup_flags, struct page **pages,
1847 struct vm_area_struct **vmas, int *locked)
1848{
1849 return 0;
1850}
3faa52c0 1851
64019a2e 1852static long __get_user_pages_remote(struct mm_struct *mm,
3faa52c0
JH
1853 unsigned long start, unsigned long nr_pages,
1854 unsigned int gup_flags, struct page **pages,
1855 struct vm_area_struct **vmas, int *locked)
1856{
1857 return 0;
1858}
eddb1c22
JH
1859#endif /* !CONFIG_MMU */
1860
adc8cb40
SJ
1861/**
1862 * get_user_pages() - pin user pages in memory
1863 * @start: starting user address
1864 * @nr_pages: number of pages from start to pin
1865 * @gup_flags: flags modifying lookup behaviour
1866 * @pages: array that receives pointers to the pages pinned.
1867 * Should be at least nr_pages long. Or NULL, if caller
1868 * only intends to ensure the pages are faulted in.
1869 * @vmas: array of pointers to vmas corresponding to each page.
1870 * Or NULL if the caller does not require them.
1871 *
64019a2e
PX
1872 * This is the same as get_user_pages_remote(), just with a less-flexible
1873 * calling convention where we assume that the mm being operated on belongs to
1874 * the current task, and doesn't allow passing of a locked parameter. We also
1875 * obviously don't pass FOLL_REMOTE in here.
932f4a63
IW
1876 */
1877long get_user_pages(unsigned long start, unsigned long nr_pages,
1878 unsigned int gup_flags, struct page **pages,
1879 struct vm_area_struct **vmas)
1880{
447f3e45 1881 if (!is_valid_gup_flags(gup_flags))
eddb1c22
JH
1882 return -EINVAL;
1883
64019a2e 1884 return __gup_longterm_locked(current->mm, start, nr_pages,
932f4a63
IW
1885 pages, vmas, gup_flags | FOLL_TOUCH);
1886}
1887EXPORT_SYMBOL(get_user_pages);
2bb6d283 1888
adc8cb40 1889/**
a00cda3f
MCC
1890 * get_user_pages_locked() - variant of get_user_pages()
1891 *
1892 * @start: starting user address
1893 * @nr_pages: number of pages from start to pin
1894 * @gup_flags: flags modifying lookup behaviour
1895 * @pages: array that receives pointers to the pages pinned.
1896 * Should be at least nr_pages long. Or NULL, if caller
1897 * only intends to ensure the pages are faulted in.
1898 * @locked: pointer to lock flag indicating whether lock is held and
1899 * subsequently whether VM_FAULT_RETRY functionality can be
1900 * utilised. Lock must initially be held.
1901 *
1902 * It is suitable to replace the form:
acc3c8d1 1903 *
3e4e28c5 1904 * mmap_read_lock(mm);
d3649f68 1905 * do_something()
64019a2e 1906 * get_user_pages(mm, ..., pages, NULL);
3e4e28c5 1907 * mmap_read_unlock(mm);
acc3c8d1 1908 *
d3649f68 1909 * to:
acc3c8d1 1910 *
d3649f68 1911 * int locked = 1;
3e4e28c5 1912 * mmap_read_lock(mm);
d3649f68 1913 * do_something()
64019a2e 1914 * get_user_pages_locked(mm, ..., pages, &locked);
d3649f68 1915 * if (locked)
3e4e28c5 1916 * mmap_read_unlock(mm);
adc8cb40 1917 *
adc8cb40
SJ
1918 * We can leverage the VM_FAULT_RETRY functionality in the page fault
1919 * paths better by using either get_user_pages_locked() or
1920 * get_user_pages_unlocked().
1921 *
acc3c8d1 1922 */
d3649f68
CH
1923long get_user_pages_locked(unsigned long start, unsigned long nr_pages,
1924 unsigned int gup_flags, struct page **pages,
1925 int *locked)
acc3c8d1 1926{
acc3c8d1 1927 /*
d3649f68
CH
1928 * FIXME: Current FOLL_LONGTERM behavior is incompatible with
1929 * FAULT_FLAG_ALLOW_RETRY because of the FS DAX check requirement on
1930 * vmas. As there are no users of this flag in this call we simply
1931 * disallow this option for now.
acc3c8d1 1932 */
d3649f68
CH
1933 if (WARN_ON_ONCE(gup_flags & FOLL_LONGTERM))
1934 return -EINVAL;
420c2091
JH
1935 /*
1936 * FOLL_PIN must only be set internally by the pin_user_pages*() APIs,
1937 * never directly by the caller, so enforce that:
1938 */
1939 if (WARN_ON_ONCE(gup_flags & FOLL_PIN))
1940 return -EINVAL;
acc3c8d1 1941
64019a2e 1942 return __get_user_pages_locked(current->mm, start, nr_pages,
d3649f68
CH
1943 pages, NULL, locked,
1944 gup_flags | FOLL_TOUCH);
acc3c8d1 1945}
d3649f68 1946EXPORT_SYMBOL(get_user_pages_locked);
acc3c8d1
KS
1947
1948/*
d3649f68 1949 * get_user_pages_unlocked() is suitable to replace the form:
acc3c8d1 1950 *
3e4e28c5 1951 * mmap_read_lock(mm);
64019a2e 1952 * get_user_pages(mm, ..., pages, NULL);
3e4e28c5 1953 * mmap_read_unlock(mm);
d3649f68
CH
1954 *
1955 * with:
1956 *
64019a2e 1957 * get_user_pages_unlocked(mm, ..., pages);
d3649f68
CH
1958 *
1959 * It is functionally equivalent to get_user_pages_fast so
1960 * get_user_pages_fast should be used instead if specific gup_flags
1961 * (e.g. FOLL_FORCE) are not required.
acc3c8d1 1962 */
d3649f68
CH
1963long get_user_pages_unlocked(unsigned long start, unsigned long nr_pages,
1964 struct page **pages, unsigned int gup_flags)
acc3c8d1
KS
1965{
1966 struct mm_struct *mm = current->mm;
d3649f68
CH
1967 int locked = 1;
1968 long ret;
acc3c8d1 1969
d3649f68
CH
1970 /*
1971 * FIXME: Current FOLL_LONGTERM behavior is incompatible with
1972 * FAULT_FLAG_ALLOW_RETRY because of the FS DAX check requirement on
1973 * vmas. As there are no users of this flag in this call we simply
1974 * disallow this option for now.
1975 */
1976 if (WARN_ON_ONCE(gup_flags & FOLL_LONGTERM))
1977 return -EINVAL;
acc3c8d1 1978
d8ed45c5 1979 mmap_read_lock(mm);
64019a2e 1980 ret = __get_user_pages_locked(mm, start, nr_pages, pages, NULL,
d3649f68 1981 &locked, gup_flags | FOLL_TOUCH);
acc3c8d1 1982 if (locked)
d8ed45c5 1983 mmap_read_unlock(mm);
d3649f68 1984 return ret;
4bbd4c77 1985}
d3649f68 1986EXPORT_SYMBOL(get_user_pages_unlocked);
2667f50e
SC
1987
1988/*
67a929e0 1989 * Fast GUP
2667f50e
SC
1990 *
1991 * get_user_pages_fast attempts to pin user pages by walking the page
1992 * tables directly and avoids taking locks. Thus the walker needs to be
1993 * protected from page table pages being freed from under it, and should
1994 * block any THP splits.
1995 *
1996 * One way to achieve this is to have the walker disable interrupts, and
1997 * rely on IPIs from the TLB flushing code blocking before the page table
1998 * pages are freed. This is unsuitable for architectures that do not need
1999 * to broadcast an IPI when invalidating TLBs.
2000 *
2001 * Another way to achieve this is to batch up page table containing pages
2002 * belonging to more than one mm_user, then rcu_sched a callback to free those
2003 * pages. Disabling interrupts will allow the fast_gup walker to both block
2004 * the rcu_sched callback, and an IPI that we broadcast for splitting THPs
2005 * (which is a relatively rare event). The code below adopts this strategy.
2006 *
2007 * Before activating this code, please be aware that the following assumptions
2008 * are currently made:
2009 *
ff2e6d72 2010 * *) Either MMU_GATHER_RCU_TABLE_FREE is enabled, and tlb_remove_table() is used to
e585513b 2011 * free pages containing page tables or TLB flushing requires IPI broadcast.
2667f50e 2012 *
2667f50e
SC
2013 * *) ptes can be read atomically by the architecture.
2014 *
2015 * *) access_ok is sufficient to validate userspace address ranges.
2016 *
2017 * The last two assumptions can be relaxed by the addition of helper functions.
2018 *
2019 * This code is based heavily on the PowerPC implementation by Nick Piggin.
2020 */
67a929e0 2021#ifdef CONFIG_HAVE_FAST_GUP
3faa52c0 2022
790c7369 2023static void __maybe_unused undo_dev_pagemap(int *nr, int nr_start,
3b78d834 2024 unsigned int flags,
790c7369 2025 struct page **pages)
b59f65fa
KS
2026{
2027 while ((*nr) - nr_start) {
2028 struct page *page = pages[--(*nr)];
2029
2030 ClearPageReferenced(page);
3faa52c0
JH
2031 if (flags & FOLL_PIN)
2032 unpin_user_page(page);
2033 else
2034 put_page(page);
b59f65fa
KS
2035 }
2036}
2037
3010a5ea 2038#ifdef CONFIG_ARCH_HAS_PTE_SPECIAL
2667f50e 2039static int gup_pte_range(pmd_t pmd, unsigned long addr, unsigned long end,
b798bec4 2040 unsigned int flags, struct page **pages, int *nr)
2667f50e 2041{
b59f65fa
KS
2042 struct dev_pagemap *pgmap = NULL;
2043 int nr_start = *nr, ret = 0;
2667f50e 2044 pte_t *ptep, *ptem;
2667f50e
SC
2045
2046 ptem = ptep = pte_offset_map(&pmd, addr);
2047 do {
2a4a06da 2048 pte_t pte = ptep_get_lockless(ptep);
7aef4172 2049 struct page *head, *page;
2667f50e
SC
2050
2051 /*
2052 * Similar to the PMD case below, NUMA hinting must take slow
8a0516ed 2053 * path using the pte_protnone check.
2667f50e 2054 */
e7884f8e
KS
2055 if (pte_protnone(pte))
2056 goto pte_unmap;
2057
b798bec4 2058 if (!pte_access_permitted(pte, flags & FOLL_WRITE))
e7884f8e
KS
2059 goto pte_unmap;
2060
b59f65fa 2061 if (pte_devmap(pte)) {
7af75561
IW
2062 if (unlikely(flags & FOLL_LONGTERM))
2063 goto pte_unmap;
2064
b59f65fa
KS
2065 pgmap = get_dev_pagemap(pte_pfn(pte), pgmap);
2066 if (unlikely(!pgmap)) {
3b78d834 2067 undo_dev_pagemap(nr, nr_start, flags, pages);
b59f65fa
KS
2068 goto pte_unmap;
2069 }
2070 } else if (pte_special(pte))
2667f50e
SC
2071 goto pte_unmap;
2072
2073 VM_BUG_ON(!pfn_valid(pte_pfn(pte)));
2074 page = pte_page(pte);
2075
3faa52c0 2076 head = try_grab_compound_head(page, 1, flags);
8fde12ca 2077 if (!head)
2667f50e
SC
2078 goto pte_unmap;
2079
2080 if (unlikely(pte_val(pte) != pte_val(*ptep))) {
3faa52c0 2081 put_compound_head(head, 1, flags);
2667f50e
SC
2082 goto pte_unmap;
2083 }
2084
7aef4172 2085 VM_BUG_ON_PAGE(compound_head(page) != head, page);
e9348053 2086
f28d4363
CI
2087 /*
2088 * We need to make the page accessible if and only if we are
2089 * going to access its content (the FOLL_PIN case). Please
2090 * see Documentation/core-api/pin_user_pages.rst for
2091 * details.
2092 */
2093 if (flags & FOLL_PIN) {
2094 ret = arch_make_page_accessible(page);
2095 if (ret) {
2096 unpin_user_page(page);
2097 goto pte_unmap;
2098 }
2099 }
e9348053 2100 SetPageReferenced(page);
2667f50e
SC
2101 pages[*nr] = page;
2102 (*nr)++;
2103
2104 } while (ptep++, addr += PAGE_SIZE, addr != end);
2105
2106 ret = 1;
2107
2108pte_unmap:
832d7aa0
CH
2109 if (pgmap)
2110 put_dev_pagemap(pgmap);
2667f50e
SC
2111 pte_unmap(ptem);
2112 return ret;
2113}
2114#else
2115
2116/*
2117 * If we can't determine whether or not a pte is special, then fail immediately
2118 * for ptes. Note, we can still pin HugeTLB and THP as these are guaranteed not
2119 * to be special.
2120 *
2121 * For a futex to be placed on a THP tail page, get_futex_key requires a
dadbb612 2122 * get_user_pages_fast_only implementation that can pin pages. Thus it's still
2667f50e
SC
2123 * useful to have gup_huge_pmd even if we can't operate on ptes.
2124 */
2125static int gup_pte_range(pmd_t pmd, unsigned long addr, unsigned long end,
b798bec4 2126 unsigned int flags, struct page **pages, int *nr)
2667f50e
SC
2127{
2128 return 0;
2129}
3010a5ea 2130#endif /* CONFIG_ARCH_HAS_PTE_SPECIAL */
2667f50e 2131
17596731 2132#if defined(CONFIG_ARCH_HAS_PTE_DEVMAP) && defined(CONFIG_TRANSPARENT_HUGEPAGE)
b59f65fa 2133static int __gup_device_huge(unsigned long pfn, unsigned long addr,
86dfbed4
JH
2134 unsigned long end, unsigned int flags,
2135 struct page **pages, int *nr)
b59f65fa
KS
2136{
2137 int nr_start = *nr;
2138 struct dev_pagemap *pgmap = NULL;
2139
2140 do {
2141 struct page *page = pfn_to_page(pfn);
2142
2143 pgmap = get_dev_pagemap(pfn, pgmap);
2144 if (unlikely(!pgmap)) {
3b78d834 2145 undo_dev_pagemap(nr, nr_start, flags, pages);
b59f65fa
KS
2146 return 0;
2147 }
2148 SetPageReferenced(page);
2149 pages[*nr] = page;
3faa52c0
JH
2150 if (unlikely(!try_grab_page(page, flags))) {
2151 undo_dev_pagemap(nr, nr_start, flags, pages);
2152 return 0;
2153 }
b59f65fa
KS
2154 (*nr)++;
2155 pfn++;
2156 } while (addr += PAGE_SIZE, addr != end);
832d7aa0
CH
2157
2158 if (pgmap)
2159 put_dev_pagemap(pgmap);
b59f65fa
KS
2160 return 1;
2161}
2162
a9b6de77 2163static int __gup_device_huge_pmd(pmd_t orig, pmd_t *pmdp, unsigned long addr,
86dfbed4
JH
2164 unsigned long end, unsigned int flags,
2165 struct page **pages, int *nr)
b59f65fa
KS
2166{
2167 unsigned long fault_pfn;
a9b6de77
DW
2168 int nr_start = *nr;
2169
2170 fault_pfn = pmd_pfn(orig) + ((addr & ~PMD_MASK) >> PAGE_SHIFT);
86dfbed4 2171 if (!__gup_device_huge(fault_pfn, addr, end, flags, pages, nr))
a9b6de77 2172 return 0;
b59f65fa 2173
a9b6de77 2174 if (unlikely(pmd_val(orig) != pmd_val(*pmdp))) {
3b78d834 2175 undo_dev_pagemap(nr, nr_start, flags, pages);
a9b6de77
DW
2176 return 0;
2177 }
2178 return 1;
b59f65fa
KS
2179}
2180
a9b6de77 2181static int __gup_device_huge_pud(pud_t orig, pud_t *pudp, unsigned long addr,
86dfbed4
JH
2182 unsigned long end, unsigned int flags,
2183 struct page **pages, int *nr)
b59f65fa
KS
2184{
2185 unsigned long fault_pfn;
a9b6de77
DW
2186 int nr_start = *nr;
2187
2188 fault_pfn = pud_pfn(orig) + ((addr & ~PUD_MASK) >> PAGE_SHIFT);
86dfbed4 2189 if (!__gup_device_huge(fault_pfn, addr, end, flags, pages, nr))
a9b6de77 2190 return 0;
b59f65fa 2191
a9b6de77 2192 if (unlikely(pud_val(orig) != pud_val(*pudp))) {
3b78d834 2193 undo_dev_pagemap(nr, nr_start, flags, pages);
a9b6de77
DW
2194 return 0;
2195 }
2196 return 1;
b59f65fa
KS
2197}
2198#else
a9b6de77 2199static int __gup_device_huge_pmd(pmd_t orig, pmd_t *pmdp, unsigned long addr,
86dfbed4
JH
2200 unsigned long end, unsigned int flags,
2201 struct page **pages, int *nr)
b59f65fa
KS
2202{
2203 BUILD_BUG();
2204 return 0;
2205}
2206
a9b6de77 2207static int __gup_device_huge_pud(pud_t pud, pud_t *pudp, unsigned long addr,
86dfbed4
JH
2208 unsigned long end, unsigned int flags,
2209 struct page **pages, int *nr)
b59f65fa
KS
2210{
2211 BUILD_BUG();
2212 return 0;
2213}
2214#endif
2215
a43e9820
JH
2216static int record_subpages(struct page *page, unsigned long addr,
2217 unsigned long end, struct page **pages)
2218{
2219 int nr;
2220
2221 for (nr = 0; addr != end; addr += PAGE_SIZE)
2222 pages[nr++] = page++;
2223
2224 return nr;
2225}
2226
cbd34da7
CH
2227#ifdef CONFIG_ARCH_HAS_HUGEPD
2228static unsigned long hugepte_addr_end(unsigned long addr, unsigned long end,
2229 unsigned long sz)
2230{
2231 unsigned long __boundary = (addr + sz) & ~(sz-1);
2232 return (__boundary - 1 < end - 1) ? __boundary : end;
2233}
2234
2235static int gup_hugepte(pte_t *ptep, unsigned long sz, unsigned long addr,
0cd22afd
JH
2236 unsigned long end, unsigned int flags,
2237 struct page **pages, int *nr)
cbd34da7
CH
2238{
2239 unsigned long pte_end;
2240 struct page *head, *page;
2241 pte_t pte;
2242 int refs;
2243
2244 pte_end = (addr + sz) & ~(sz-1);
2245 if (pte_end < end)
2246 end = pte_end;
2247
55ca2263 2248 pte = huge_ptep_get(ptep);
cbd34da7 2249
0cd22afd 2250 if (!pte_access_permitted(pte, flags & FOLL_WRITE))
cbd34da7
CH
2251 return 0;
2252
2253 /* hugepages are never "special" */
2254 VM_BUG_ON(!pfn_valid(pte_pfn(pte)));
2255
cbd34da7 2256 head = pte_page(pte);
cbd34da7 2257 page = head + ((addr & (sz-1)) >> PAGE_SHIFT);
a43e9820 2258 refs = record_subpages(page, addr, end, pages + *nr);
cbd34da7 2259
3faa52c0 2260 head = try_grab_compound_head(head, refs, flags);
a43e9820 2261 if (!head)
cbd34da7 2262 return 0;
cbd34da7
CH
2263
2264 if (unlikely(pte_val(pte) != pte_val(*ptep))) {
3b78d834 2265 put_compound_head(head, refs, flags);
cbd34da7
CH
2266 return 0;
2267 }
2268
a43e9820 2269 *nr += refs;
520b4a44 2270 SetPageReferenced(head);
cbd34da7
CH
2271 return 1;
2272}
2273
2274static int gup_huge_pd(hugepd_t hugepd, unsigned long addr,
0cd22afd 2275 unsigned int pdshift, unsigned long end, unsigned int flags,
cbd34da7
CH
2276 struct page **pages, int *nr)
2277{
2278 pte_t *ptep;
2279 unsigned long sz = 1UL << hugepd_shift(hugepd);
2280 unsigned long next;
2281
2282 ptep = hugepte_offset(hugepd, addr, pdshift);
2283 do {
2284 next = hugepte_addr_end(addr, end, sz);
0cd22afd 2285 if (!gup_hugepte(ptep, sz, addr, end, flags, pages, nr))
cbd34da7
CH
2286 return 0;
2287 } while (ptep++, addr = next, addr != end);
2288
2289 return 1;
2290}
2291#else
2292static inline int gup_huge_pd(hugepd_t hugepd, unsigned long addr,
0cd22afd 2293 unsigned int pdshift, unsigned long end, unsigned int flags,
cbd34da7
CH
2294 struct page **pages, int *nr)
2295{
2296 return 0;
2297}
2298#endif /* CONFIG_ARCH_HAS_HUGEPD */
2299
2667f50e 2300static int gup_huge_pmd(pmd_t orig, pmd_t *pmdp, unsigned long addr,
0cd22afd
JH
2301 unsigned long end, unsigned int flags,
2302 struct page **pages, int *nr)
2667f50e 2303{
ddc58f27 2304 struct page *head, *page;
2667f50e
SC
2305 int refs;
2306
b798bec4 2307 if (!pmd_access_permitted(orig, flags & FOLL_WRITE))
2667f50e
SC
2308 return 0;
2309
7af75561
IW
2310 if (pmd_devmap(orig)) {
2311 if (unlikely(flags & FOLL_LONGTERM))
2312 return 0;
86dfbed4
JH
2313 return __gup_device_huge_pmd(orig, pmdp, addr, end, flags,
2314 pages, nr);
7af75561 2315 }
b59f65fa 2316
d63206ee 2317 page = pmd_page(orig) + ((addr & ~PMD_MASK) >> PAGE_SHIFT);
a43e9820 2318 refs = record_subpages(page, addr, end, pages + *nr);
2667f50e 2319
3faa52c0 2320 head = try_grab_compound_head(pmd_page(orig), refs, flags);
a43e9820 2321 if (!head)
2667f50e 2322 return 0;
2667f50e
SC
2323
2324 if (unlikely(pmd_val(orig) != pmd_val(*pmdp))) {
3b78d834 2325 put_compound_head(head, refs, flags);
2667f50e
SC
2326 return 0;
2327 }
2328
a43e9820 2329 *nr += refs;
e9348053 2330 SetPageReferenced(head);
2667f50e
SC
2331 return 1;
2332}
2333
2334static int gup_huge_pud(pud_t orig, pud_t *pudp, unsigned long addr,
86dfbed4
JH
2335 unsigned long end, unsigned int flags,
2336 struct page **pages, int *nr)
2667f50e 2337{
ddc58f27 2338 struct page *head, *page;
2667f50e
SC
2339 int refs;
2340
b798bec4 2341 if (!pud_access_permitted(orig, flags & FOLL_WRITE))
2667f50e
SC
2342 return 0;
2343
7af75561
IW
2344 if (pud_devmap(orig)) {
2345 if (unlikely(flags & FOLL_LONGTERM))
2346 return 0;
86dfbed4
JH
2347 return __gup_device_huge_pud(orig, pudp, addr, end, flags,
2348 pages, nr);
7af75561 2349 }
b59f65fa 2350
d63206ee 2351 page = pud_page(orig) + ((addr & ~PUD_MASK) >> PAGE_SHIFT);
a43e9820 2352 refs = record_subpages(page, addr, end, pages + *nr);
2667f50e 2353
3faa52c0 2354 head = try_grab_compound_head(pud_page(orig), refs, flags);
a43e9820 2355 if (!head)
2667f50e 2356 return 0;
2667f50e
SC
2357
2358 if (unlikely(pud_val(orig) != pud_val(*pudp))) {
3b78d834 2359 put_compound_head(head, refs, flags);
2667f50e
SC
2360 return 0;
2361 }
2362
a43e9820 2363 *nr += refs;
e9348053 2364 SetPageReferenced(head);
2667f50e
SC
2365 return 1;
2366}
2367
f30c59e9 2368static int gup_huge_pgd(pgd_t orig, pgd_t *pgdp, unsigned long addr,
b798bec4 2369 unsigned long end, unsigned int flags,
f30c59e9
AK
2370 struct page **pages, int *nr)
2371{
2372 int refs;
ddc58f27 2373 struct page *head, *page;
f30c59e9 2374
b798bec4 2375 if (!pgd_access_permitted(orig, flags & FOLL_WRITE))
f30c59e9
AK
2376 return 0;
2377
b59f65fa 2378 BUILD_BUG_ON(pgd_devmap(orig));
a43e9820 2379
d63206ee 2380 page = pgd_page(orig) + ((addr & ~PGDIR_MASK) >> PAGE_SHIFT);
a43e9820 2381 refs = record_subpages(page, addr, end, pages + *nr);
f30c59e9 2382
3faa52c0 2383 head = try_grab_compound_head(pgd_page(orig), refs, flags);
a43e9820 2384 if (!head)
f30c59e9 2385 return 0;
f30c59e9
AK
2386
2387 if (unlikely(pgd_val(orig) != pgd_val(*pgdp))) {
3b78d834 2388 put_compound_head(head, refs, flags);
f30c59e9
AK
2389 return 0;
2390 }
2391
a43e9820 2392 *nr += refs;
e9348053 2393 SetPageReferenced(head);
f30c59e9
AK
2394 return 1;
2395}
2396
d3f7b1bb 2397static int gup_pmd_range(pud_t *pudp, pud_t pud, unsigned long addr, unsigned long end,
b798bec4 2398 unsigned int flags, struct page **pages, int *nr)
2667f50e
SC
2399{
2400 unsigned long next;
2401 pmd_t *pmdp;
2402
d3f7b1bb 2403 pmdp = pmd_offset_lockless(pudp, pud, addr);
2667f50e 2404 do {
38c5ce93 2405 pmd_t pmd = READ_ONCE(*pmdp);
2667f50e
SC
2406
2407 next = pmd_addr_end(addr, end);
84c3fc4e 2408 if (!pmd_present(pmd))
2667f50e
SC
2409 return 0;
2410
414fd080
YZ
2411 if (unlikely(pmd_trans_huge(pmd) || pmd_huge(pmd) ||
2412 pmd_devmap(pmd))) {
2667f50e
SC
2413 /*
2414 * NUMA hinting faults need to be handled in the GUP
2415 * slowpath for accounting purposes and so that they
2416 * can be serialised against THP migration.
2417 */
8a0516ed 2418 if (pmd_protnone(pmd))
2667f50e
SC
2419 return 0;
2420
b798bec4 2421 if (!gup_huge_pmd(pmd, pmdp, addr, next, flags,
2667f50e
SC
2422 pages, nr))
2423 return 0;
2424
f30c59e9
AK
2425 } else if (unlikely(is_hugepd(__hugepd(pmd_val(pmd))))) {
2426 /*
2427 * architecture have different format for hugetlbfs
2428 * pmd format and THP pmd format
2429 */
2430 if (!gup_huge_pd(__hugepd(pmd_val(pmd)), addr,
b798bec4 2431 PMD_SHIFT, next, flags, pages, nr))
f30c59e9 2432 return 0;
b798bec4 2433 } else if (!gup_pte_range(pmd, addr, next, flags, pages, nr))
2923117b 2434 return 0;
2667f50e
SC
2435 } while (pmdp++, addr = next, addr != end);
2436
2437 return 1;
2438}
2439
d3f7b1bb 2440static int gup_pud_range(p4d_t *p4dp, p4d_t p4d, unsigned long addr, unsigned long end,
b798bec4 2441 unsigned int flags, struct page **pages, int *nr)
2667f50e
SC
2442{
2443 unsigned long next;
2444 pud_t *pudp;
2445
d3f7b1bb 2446 pudp = pud_offset_lockless(p4dp, p4d, addr);
2667f50e 2447 do {
e37c6982 2448 pud_t pud = READ_ONCE(*pudp);
2667f50e
SC
2449
2450 next = pud_addr_end(addr, end);
15494520 2451 if (unlikely(!pud_present(pud)))
2667f50e 2452 return 0;
f30c59e9 2453 if (unlikely(pud_huge(pud))) {
b798bec4 2454 if (!gup_huge_pud(pud, pudp, addr, next, flags,
f30c59e9
AK
2455 pages, nr))
2456 return 0;
2457 } else if (unlikely(is_hugepd(__hugepd(pud_val(pud))))) {
2458 if (!gup_huge_pd(__hugepd(pud_val(pud)), addr,
b798bec4 2459 PUD_SHIFT, next, flags, pages, nr))
2667f50e 2460 return 0;
d3f7b1bb 2461 } else if (!gup_pmd_range(pudp, pud, addr, next, flags, pages, nr))
2667f50e
SC
2462 return 0;
2463 } while (pudp++, addr = next, addr != end);
2464
2465 return 1;
2466}
2467
d3f7b1bb 2468static int gup_p4d_range(pgd_t *pgdp, pgd_t pgd, unsigned long addr, unsigned long end,
b798bec4 2469 unsigned int flags, struct page **pages, int *nr)
c2febafc
KS
2470{
2471 unsigned long next;
2472 p4d_t *p4dp;
2473
d3f7b1bb 2474 p4dp = p4d_offset_lockless(pgdp, pgd, addr);
c2febafc
KS
2475 do {
2476 p4d_t p4d = READ_ONCE(*p4dp);
2477
2478 next = p4d_addr_end(addr, end);
2479 if (p4d_none(p4d))
2480 return 0;
2481 BUILD_BUG_ON(p4d_huge(p4d));
2482 if (unlikely(is_hugepd(__hugepd(p4d_val(p4d))))) {
2483 if (!gup_huge_pd(__hugepd(p4d_val(p4d)), addr,
b798bec4 2484 P4D_SHIFT, next, flags, pages, nr))
c2febafc 2485 return 0;
d3f7b1bb 2486 } else if (!gup_pud_range(p4dp, p4d, addr, next, flags, pages, nr))
c2febafc
KS
2487 return 0;
2488 } while (p4dp++, addr = next, addr != end);
2489
2490 return 1;
2491}
2492
5b65c467 2493static void gup_pgd_range(unsigned long addr, unsigned long end,
b798bec4 2494 unsigned int flags, struct page **pages, int *nr)
5b65c467
KS
2495{
2496 unsigned long next;
2497 pgd_t *pgdp;
2498
2499 pgdp = pgd_offset(current->mm, addr);
2500 do {
2501 pgd_t pgd = READ_ONCE(*pgdp);
2502
2503 next = pgd_addr_end(addr, end);
2504 if (pgd_none(pgd))
2505 return;
2506 if (unlikely(pgd_huge(pgd))) {
b798bec4 2507 if (!gup_huge_pgd(pgd, pgdp, addr, next, flags,
5b65c467
KS
2508 pages, nr))
2509 return;
2510 } else if (unlikely(is_hugepd(__hugepd(pgd_val(pgd))))) {
2511 if (!gup_huge_pd(__hugepd(pgd_val(pgd)), addr,
b798bec4 2512 PGDIR_SHIFT, next, flags, pages, nr))
5b65c467 2513 return;
d3f7b1bb 2514 } else if (!gup_p4d_range(pgdp, pgd, addr, next, flags, pages, nr))
5b65c467
KS
2515 return;
2516 } while (pgdp++, addr = next, addr != end);
2517}
050a9adc
CH
2518#else
2519static inline void gup_pgd_range(unsigned long addr, unsigned long end,
2520 unsigned int flags, struct page **pages, int *nr)
2521{
2522}
2523#endif /* CONFIG_HAVE_FAST_GUP */
5b65c467
KS
2524
2525#ifndef gup_fast_permitted
2526/*
dadbb612 2527 * Check if it's allowed to use get_user_pages_fast_only() for the range, or
5b65c467
KS
2528 * we need to fall back to the slow version:
2529 */
26f4c328 2530static bool gup_fast_permitted(unsigned long start, unsigned long end)
5b65c467 2531{
26f4c328 2532 return true;
5b65c467
KS
2533}
2534#endif
2535
7af75561
IW
2536static int __gup_longterm_unlocked(unsigned long start, int nr_pages,
2537 unsigned int gup_flags, struct page **pages)
2538{
2539 int ret;
2540
2541 /*
2542 * FIXME: FOLL_LONGTERM does not work with
2543 * get_user_pages_unlocked() (see comments in that function)
2544 */
2545 if (gup_flags & FOLL_LONGTERM) {
d8ed45c5 2546 mmap_read_lock(current->mm);
64019a2e 2547 ret = __gup_longterm_locked(current->mm,
7af75561
IW
2548 start, nr_pages,
2549 pages, NULL, gup_flags);
d8ed45c5 2550 mmap_read_unlock(current->mm);
7af75561
IW
2551 } else {
2552 ret = get_user_pages_unlocked(start, nr_pages,
2553 pages, gup_flags);
2554 }
2555
2556 return ret;
2557}
2558
c28b1fc7
JG
2559static unsigned long lockless_pages_from_mm(unsigned long start,
2560 unsigned long end,
2561 unsigned int gup_flags,
2562 struct page **pages)
2563{
2564 unsigned long flags;
2565 int nr_pinned = 0;
57efa1fe 2566 unsigned seq;
c28b1fc7
JG
2567
2568 if (!IS_ENABLED(CONFIG_HAVE_FAST_GUP) ||
2569 !gup_fast_permitted(start, end))
2570 return 0;
2571
57efa1fe
JG
2572 if (gup_flags & FOLL_PIN) {
2573 seq = raw_read_seqcount(&current->mm->write_protect_seq);
2574 if (seq & 1)
2575 return 0;
2576 }
2577
c28b1fc7
JG
2578 /*
2579 * Disable interrupts. The nested form is used, in order to allow full,
2580 * general purpose use of this routine.
2581 *
2582 * With interrupts disabled, we block page table pages from being freed
2583 * from under us. See struct mmu_table_batch comments in
2584 * include/asm-generic/tlb.h for more details.
2585 *
2586 * We do not adopt an rcu_read_lock() here as we also want to block IPIs
2587 * that come from THPs splitting.
2588 */
2589 local_irq_save(flags);
2590 gup_pgd_range(start, end, gup_flags, pages, &nr_pinned);
2591 local_irq_restore(flags);
57efa1fe
JG
2592
2593 /*
2594 * When pinning pages for DMA there could be a concurrent write protect
2595 * from fork() via copy_page_range(), in this case always fail fast GUP.
2596 */
2597 if (gup_flags & FOLL_PIN) {
2598 if (read_seqcount_retry(&current->mm->write_protect_seq, seq)) {
2599 unpin_user_pages(pages, nr_pinned);
2600 return 0;
2601 }
2602 }
c28b1fc7
JG
2603 return nr_pinned;
2604}
2605
2606static int internal_get_user_pages_fast(unsigned long start,
2607 unsigned long nr_pages,
eddb1c22
JH
2608 unsigned int gup_flags,
2609 struct page **pages)
2667f50e 2610{
c28b1fc7
JG
2611 unsigned long len, end;
2612 unsigned long nr_pinned;
2613 int ret;
2667f50e 2614
f4000fdf 2615 if (WARN_ON_ONCE(gup_flags & ~(FOLL_WRITE | FOLL_LONGTERM |
376a34ef
JH
2616 FOLL_FORCE | FOLL_PIN | FOLL_GET |
2617 FOLL_FAST_ONLY)))
817be129
CH
2618 return -EINVAL;
2619
008cfe44
PX
2620 if (gup_flags & FOLL_PIN)
2621 atomic_set(&current->mm->has_pinned, 1);
2622
f81cd178 2623 if (!(gup_flags & FOLL_FAST_ONLY))
da1c55f1 2624 might_lock_read(&current->mm->mmap_lock);
f81cd178 2625
f455c854 2626 start = untagged_addr(start) & PAGE_MASK;
c28b1fc7
JG
2627 len = nr_pages << PAGE_SHIFT;
2628 if (check_add_overflow(start, len, &end))
c61611f7 2629 return 0;
96d4f267 2630 if (unlikely(!access_ok((void __user *)start, len)))
c61611f7 2631 return -EFAULT;
73e10a61 2632
c28b1fc7
JG
2633 nr_pinned = lockless_pages_from_mm(start, end, gup_flags, pages);
2634 if (nr_pinned == nr_pages || gup_flags & FOLL_FAST_ONLY)
2635 return nr_pinned;
2667f50e 2636
c28b1fc7
JG
2637 /* Slow path: try to get the remaining pages with get_user_pages */
2638 start += nr_pinned << PAGE_SHIFT;
2639 pages += nr_pinned;
2640 ret = __gup_longterm_unlocked(start, nr_pages - nr_pinned, gup_flags,
2641 pages);
2642 if (ret < 0) {
2643 /*
2644 * The caller has to unpin the pages we already pinned so
2645 * returning -errno is not an option
2646 */
2647 if (nr_pinned)
2648 return nr_pinned;
2649 return ret;
2667f50e 2650 }
c28b1fc7 2651 return ret + nr_pinned;
2667f50e 2652}
c28b1fc7 2653
dadbb612
SJ
2654/**
2655 * get_user_pages_fast_only() - pin user pages in memory
2656 * @start: starting user address
2657 * @nr_pages: number of pages from start to pin
2658 * @gup_flags: flags modifying pin behaviour
2659 * @pages: array that receives pointers to the pages pinned.
2660 * Should be at least nr_pages long.
2661 *
9e1f0580
JH
2662 * Like get_user_pages_fast() except it's IRQ-safe in that it won't fall back to
2663 * the regular GUP.
2664 * Note a difference with get_user_pages_fast: this always returns the
2665 * number of pages pinned, 0 if no pages were pinned.
2666 *
2667 * If the architecture does not support this function, simply return with no
2668 * pages pinned.
2669 *
2670 * Careful, careful! COW breaking can go either way, so a non-write
2671 * access can get ambiguous page results. If you call this function without
2672 * 'write' set, you'd better be sure that you're ok with that ambiguity.
2673 */
dadbb612
SJ
2674int get_user_pages_fast_only(unsigned long start, int nr_pages,
2675 unsigned int gup_flags, struct page **pages)
9e1f0580 2676{
376a34ef 2677 int nr_pinned;
9e1f0580
JH
2678 /*
2679 * Internally (within mm/gup.c), gup fast variants must set FOLL_GET,
2680 * because gup fast is always a "pin with a +1 page refcount" request.
376a34ef
JH
2681 *
2682 * FOLL_FAST_ONLY is required in order to match the API description of
2683 * this routine: no fall back to regular ("slow") GUP.
9e1f0580 2684 */
dadbb612 2685 gup_flags |= FOLL_GET | FOLL_FAST_ONLY;
9e1f0580 2686
376a34ef
JH
2687 nr_pinned = internal_get_user_pages_fast(start, nr_pages, gup_flags,
2688 pages);
9e1f0580
JH
2689
2690 /*
376a34ef
JH
2691 * As specified in the API description above, this routine is not
2692 * allowed to return negative values. However, the common core
2693 * routine internal_get_user_pages_fast() *can* return -errno.
2694 * Therefore, correct for that here:
9e1f0580 2695 */
376a34ef
JH
2696 if (nr_pinned < 0)
2697 nr_pinned = 0;
9e1f0580
JH
2698
2699 return nr_pinned;
2700}
dadbb612 2701EXPORT_SYMBOL_GPL(get_user_pages_fast_only);
9e1f0580 2702
eddb1c22
JH
2703/**
2704 * get_user_pages_fast() - pin user pages in memory
3faa52c0
JH
2705 * @start: starting user address
2706 * @nr_pages: number of pages from start to pin
2707 * @gup_flags: flags modifying pin behaviour
2708 * @pages: array that receives pointers to the pages pinned.
2709 * Should be at least nr_pages long.
eddb1c22 2710 *
c1e8d7c6 2711 * Attempt to pin user pages in memory without taking mm->mmap_lock.
eddb1c22
JH
2712 * If not successful, it will fall back to taking the lock and
2713 * calling get_user_pages().
2714 *
2715 * Returns number of pages pinned. This may be fewer than the number requested.
2716 * If nr_pages is 0 or negative, returns 0. If no pages were pinned, returns
2717 * -errno.
2718 */
2719int get_user_pages_fast(unsigned long start, int nr_pages,
2720 unsigned int gup_flags, struct page **pages)
2721{
447f3e45 2722 if (!is_valid_gup_flags(gup_flags))
eddb1c22
JH
2723 return -EINVAL;
2724
94202f12
JH
2725 /*
2726 * The caller may or may not have explicitly set FOLL_GET; either way is
2727 * OK. However, internally (within mm/gup.c), gup fast variants must set
2728 * FOLL_GET, because gup fast is always a "pin with a +1 page refcount"
2729 * request.
2730 */
2731 gup_flags |= FOLL_GET;
eddb1c22
JH
2732 return internal_get_user_pages_fast(start, nr_pages, gup_flags, pages);
2733}
050a9adc 2734EXPORT_SYMBOL_GPL(get_user_pages_fast);
eddb1c22
JH
2735
2736/**
2737 * pin_user_pages_fast() - pin user pages in memory without taking locks
2738 *
3faa52c0
JH
2739 * @start: starting user address
2740 * @nr_pages: number of pages from start to pin
2741 * @gup_flags: flags modifying pin behaviour
2742 * @pages: array that receives pointers to the pages pinned.
2743 * Should be at least nr_pages long.
2744 *
2745 * Nearly the same as get_user_pages_fast(), except that FOLL_PIN is set. See
2746 * get_user_pages_fast() for documentation on the function arguments, because
2747 * the arguments here are identical.
2748 *
2749 * FOLL_PIN means that the pages must be released via unpin_user_page(). Please
72ef5e52 2750 * see Documentation/core-api/pin_user_pages.rst for further details.
eddb1c22
JH
2751 */
2752int pin_user_pages_fast(unsigned long start, int nr_pages,
2753 unsigned int gup_flags, struct page **pages)
2754{
3faa52c0
JH
2755 /* FOLL_GET and FOLL_PIN are mutually exclusive. */
2756 if (WARN_ON_ONCE(gup_flags & FOLL_GET))
2757 return -EINVAL;
2758
2759 gup_flags |= FOLL_PIN;
2760 return internal_get_user_pages_fast(start, nr_pages, gup_flags, pages);
eddb1c22
JH
2761}
2762EXPORT_SYMBOL_GPL(pin_user_pages_fast);
2763
104acc32 2764/*
dadbb612
SJ
2765 * This is the FOLL_PIN equivalent of get_user_pages_fast_only(). Behavior
2766 * is the same, except that this one sets FOLL_PIN instead of FOLL_GET.
104acc32
JH
2767 *
2768 * The API rules are the same, too: no negative values may be returned.
2769 */
2770int pin_user_pages_fast_only(unsigned long start, int nr_pages,
2771 unsigned int gup_flags, struct page **pages)
2772{
2773 int nr_pinned;
2774
2775 /*
2776 * FOLL_GET and FOLL_PIN are mutually exclusive. Note that the API
2777 * rules require returning 0, rather than -errno:
2778 */
2779 if (WARN_ON_ONCE(gup_flags & FOLL_GET))
2780 return 0;
2781 /*
2782 * FOLL_FAST_ONLY is required in order to match the API description of
2783 * this routine: no fall back to regular ("slow") GUP.
2784 */
2785 gup_flags |= (FOLL_PIN | FOLL_FAST_ONLY);
2786 nr_pinned = internal_get_user_pages_fast(start, nr_pages, gup_flags,
2787 pages);
2788 /*
2789 * This routine is not allowed to return negative values. However,
2790 * internal_get_user_pages_fast() *can* return -errno. Therefore,
2791 * correct for that here:
2792 */
2793 if (nr_pinned < 0)
2794 nr_pinned = 0;
2795
2796 return nr_pinned;
2797}
2798EXPORT_SYMBOL_GPL(pin_user_pages_fast_only);
2799
eddb1c22 2800/**
64019a2e 2801 * pin_user_pages_remote() - pin pages of a remote process
eddb1c22 2802 *
3faa52c0
JH
2803 * @mm: mm_struct of target mm
2804 * @start: starting user address
2805 * @nr_pages: number of pages from start to pin
2806 * @gup_flags: flags modifying lookup behaviour
2807 * @pages: array that receives pointers to the pages pinned.
2808 * Should be at least nr_pages long. Or NULL, if caller
2809 * only intends to ensure the pages are faulted in.
2810 * @vmas: array of pointers to vmas corresponding to each page.
2811 * Or NULL if the caller does not require them.
2812 * @locked: pointer to lock flag indicating whether lock is held and
2813 * subsequently whether VM_FAULT_RETRY functionality can be
2814 * utilised. Lock must initially be held.
2815 *
2816 * Nearly the same as get_user_pages_remote(), except that FOLL_PIN is set. See
2817 * get_user_pages_remote() for documentation on the function arguments, because
2818 * the arguments here are identical.
2819 *
2820 * FOLL_PIN means that the pages must be released via unpin_user_page(). Please
72ef5e52 2821 * see Documentation/core-api/pin_user_pages.rst for details.
eddb1c22 2822 */
64019a2e 2823long pin_user_pages_remote(struct mm_struct *mm,
eddb1c22
JH
2824 unsigned long start, unsigned long nr_pages,
2825 unsigned int gup_flags, struct page **pages,
2826 struct vm_area_struct **vmas, int *locked)
2827{
3faa52c0
JH
2828 /* FOLL_GET and FOLL_PIN are mutually exclusive. */
2829 if (WARN_ON_ONCE(gup_flags & FOLL_GET))
2830 return -EINVAL;
2831
2832 gup_flags |= FOLL_PIN;
64019a2e 2833 return __get_user_pages_remote(mm, start, nr_pages, gup_flags,
3faa52c0 2834 pages, vmas, locked);
eddb1c22
JH
2835}
2836EXPORT_SYMBOL(pin_user_pages_remote);
2837
2838/**
2839 * pin_user_pages() - pin user pages in memory for use by other devices
2840 *
3faa52c0
JH
2841 * @start: starting user address
2842 * @nr_pages: number of pages from start to pin
2843 * @gup_flags: flags modifying lookup behaviour
2844 * @pages: array that receives pointers to the pages pinned.
2845 * Should be at least nr_pages long. Or NULL, if caller
2846 * only intends to ensure the pages are faulted in.
2847 * @vmas: array of pointers to vmas corresponding to each page.
2848 * Or NULL if the caller does not require them.
2849 *
2850 * Nearly the same as get_user_pages(), except that FOLL_TOUCH is not set, and
2851 * FOLL_PIN is set.
2852 *
2853 * FOLL_PIN means that the pages must be released via unpin_user_page(). Please
72ef5e52 2854 * see Documentation/core-api/pin_user_pages.rst for details.
eddb1c22
JH
2855 */
2856long pin_user_pages(unsigned long start, unsigned long nr_pages,
2857 unsigned int gup_flags, struct page **pages,
2858 struct vm_area_struct **vmas)
2859{
3faa52c0
JH
2860 /* FOLL_GET and FOLL_PIN are mutually exclusive. */
2861 if (WARN_ON_ONCE(gup_flags & FOLL_GET))
2862 return -EINVAL;
2863
2864 gup_flags |= FOLL_PIN;
64019a2e 2865 return __gup_longterm_locked(current->mm, start, nr_pages,
3faa52c0 2866 pages, vmas, gup_flags);
eddb1c22
JH
2867}
2868EXPORT_SYMBOL(pin_user_pages);
91429023
JH
2869
2870/*
2871 * pin_user_pages_unlocked() is the FOLL_PIN variant of
2872 * get_user_pages_unlocked(). Behavior is the same, except that this one sets
2873 * FOLL_PIN and rejects FOLL_GET.
2874 */
2875long pin_user_pages_unlocked(unsigned long start, unsigned long nr_pages,
2876 struct page **pages, unsigned int gup_flags)
2877{
2878 /* FOLL_GET and FOLL_PIN are mutually exclusive. */
2879 if (WARN_ON_ONCE(gup_flags & FOLL_GET))
2880 return -EINVAL;
2881
2882 gup_flags |= FOLL_PIN;
2883 return get_user_pages_unlocked(start, nr_pages, pages, gup_flags);
2884}
2885EXPORT_SYMBOL(pin_user_pages_unlocked);
420c2091
JH
2886
2887/*
2888 * pin_user_pages_locked() is the FOLL_PIN variant of get_user_pages_locked().
2889 * Behavior is the same, except that this one sets FOLL_PIN and rejects
2890 * FOLL_GET.
2891 */
2892long pin_user_pages_locked(unsigned long start, unsigned long nr_pages,
2893 unsigned int gup_flags, struct page **pages,
2894 int *locked)
2895{
2896 /*
2897 * FIXME: Current FOLL_LONGTERM behavior is incompatible with
2898 * FAULT_FLAG_ALLOW_RETRY because of the FS DAX check requirement on
2899 * vmas. As there are no users of this flag in this call we simply
2900 * disallow this option for now.
2901 */
2902 if (WARN_ON_ONCE(gup_flags & FOLL_LONGTERM))
2903 return -EINVAL;
2904
2905 /* FOLL_GET and FOLL_PIN are mutually exclusive. */
2906 if (WARN_ON_ONCE(gup_flags & FOLL_GET))
2907 return -EINVAL;
2908
2909 gup_flags |= FOLL_PIN;
64019a2e 2910 return __get_user_pages_locked(current->mm, start, nr_pages,
420c2091
JH
2911 pages, NULL, locked,
2912 gup_flags | FOLL_TOUCH);
2913}
2914EXPORT_SYMBOL(pin_user_pages_locked);