mm: gup: add get_user_pages_locked and get_user_pages_unlocked
[linux-2.6-block.git] / mm / gup.c
CommitLineData
4bbd4c77
KS
1#include <linux/kernel.h>
2#include <linux/errno.h>
3#include <linux/err.h>
4#include <linux/spinlock.h>
5
4bbd4c77
KS
6#include <linux/mm.h>
7#include <linux/pagemap.h>
8#include <linux/rmap.h>
9#include <linux/swap.h>
10#include <linux/swapops.h>
11
2667f50e
SC
12#include <linux/sched.h>
13#include <linux/rwsem.h>
f30c59e9 14#include <linux/hugetlb.h>
2667f50e
SC
15#include <asm/pgtable.h>
16
4bbd4c77
KS
17#include "internal.h"
18
69e68b4f
KS
19static struct page *no_page_table(struct vm_area_struct *vma,
20 unsigned int flags)
4bbd4c77 21{
69e68b4f
KS
22 /*
23 * When core dumping an enormous anonymous area that nobody
24 * has touched so far, we don't want to allocate unnecessary pages or
25 * page tables. Return error instead of NULL to skip handle_mm_fault,
26 * then get_dump_page() will return NULL to leave a hole in the dump.
27 * But we can only make this optimization where a hole would surely
28 * be zero-filled if handle_mm_fault() actually did handle it.
29 */
30 if ((flags & FOLL_DUMP) && (!vma->vm_ops || !vma->vm_ops->fault))
31 return ERR_PTR(-EFAULT);
32 return NULL;
33}
4bbd4c77 34
69e68b4f
KS
35static struct page *follow_page_pte(struct vm_area_struct *vma,
36 unsigned long address, pmd_t *pmd, unsigned int flags)
37{
38 struct mm_struct *mm = vma->vm_mm;
39 struct page *page;
40 spinlock_t *ptl;
41 pte_t *ptep, pte;
4bbd4c77 42
69e68b4f 43retry:
4bbd4c77 44 if (unlikely(pmd_bad(*pmd)))
69e68b4f 45 return no_page_table(vma, flags);
4bbd4c77
KS
46
47 ptep = pte_offset_map_lock(mm, pmd, address, &ptl);
4bbd4c77
KS
48 pte = *ptep;
49 if (!pte_present(pte)) {
50 swp_entry_t entry;
51 /*
52 * KSM's break_ksm() relies upon recognizing a ksm page
53 * even while it is being migrated, so for that case we
54 * need migration_entry_wait().
55 */
56 if (likely(!(flags & FOLL_MIGRATION)))
57 goto no_page;
0661a336 58 if (pte_none(pte))
4bbd4c77
KS
59 goto no_page;
60 entry = pte_to_swp_entry(pte);
61 if (!is_migration_entry(entry))
62 goto no_page;
63 pte_unmap_unlock(ptep, ptl);
64 migration_entry_wait(mm, pmd, address);
69e68b4f 65 goto retry;
4bbd4c77
KS
66 }
67 if ((flags & FOLL_NUMA) && pte_numa(pte))
68 goto no_page;
69e68b4f
KS
69 if ((flags & FOLL_WRITE) && !pte_write(pte)) {
70 pte_unmap_unlock(ptep, ptl);
71 return NULL;
72 }
4bbd4c77
KS
73
74 page = vm_normal_page(vma, address, pte);
75 if (unlikely(!page)) {
76 if ((flags & FOLL_DUMP) ||
77 !is_zero_pfn(pte_pfn(pte)))
78 goto bad_page;
79 page = pte_page(pte);
80 }
81
82 if (flags & FOLL_GET)
83 get_page_foll(page);
84 if (flags & FOLL_TOUCH) {
85 if ((flags & FOLL_WRITE) &&
86 !pte_dirty(pte) && !PageDirty(page))
87 set_page_dirty(page);
88 /*
89 * pte_mkyoung() would be more correct here, but atomic care
90 * is needed to avoid losing the dirty bit: it is easier to use
91 * mark_page_accessed().
92 */
93 mark_page_accessed(page);
94 }
95 if ((flags & FOLL_MLOCK) && (vma->vm_flags & VM_LOCKED)) {
96 /*
97 * The preliminary mapping check is mainly to avoid the
98 * pointless overhead of lock_page on the ZERO_PAGE
99 * which might bounce very badly if there is contention.
100 *
101 * If the page is already locked, we don't need to
102 * handle it now - vmscan will handle it later if and
103 * when it attempts to reclaim the page.
104 */
105 if (page->mapping && trylock_page(page)) {
106 lru_add_drain(); /* push cached pages to LRU */
107 /*
108 * Because we lock page here, and migration is
109 * blocked by the pte's page reference, and we
110 * know the page is still mapped, we don't even
111 * need to check for file-cache page truncation.
112 */
113 mlock_vma_page(page);
114 unlock_page(page);
115 }
116 }
4bbd4c77 117 pte_unmap_unlock(ptep, ptl);
4bbd4c77 118 return page;
4bbd4c77
KS
119bad_page:
120 pte_unmap_unlock(ptep, ptl);
121 return ERR_PTR(-EFAULT);
122
123no_page:
124 pte_unmap_unlock(ptep, ptl);
125 if (!pte_none(pte))
69e68b4f
KS
126 return NULL;
127 return no_page_table(vma, flags);
128}
129
130/**
131 * follow_page_mask - look up a page descriptor from a user-virtual address
132 * @vma: vm_area_struct mapping @address
133 * @address: virtual address to look up
134 * @flags: flags modifying lookup behaviour
135 * @page_mask: on output, *page_mask is set according to the size of the page
136 *
137 * @flags can have FOLL_ flags set, defined in <linux/mm.h>
138 *
139 * Returns the mapped (struct page *), %NULL if no mapping exists, or
140 * an error pointer if there is a mapping to something not represented
141 * by a page descriptor (see also vm_normal_page()).
142 */
143struct page *follow_page_mask(struct vm_area_struct *vma,
144 unsigned long address, unsigned int flags,
145 unsigned int *page_mask)
146{
147 pgd_t *pgd;
148 pud_t *pud;
149 pmd_t *pmd;
150 spinlock_t *ptl;
151 struct page *page;
152 struct mm_struct *mm = vma->vm_mm;
153
154 *page_mask = 0;
155
156 page = follow_huge_addr(mm, address, flags & FOLL_WRITE);
157 if (!IS_ERR(page)) {
158 BUG_ON(flags & FOLL_GET);
4bbd4c77 159 return page;
69e68b4f 160 }
4bbd4c77 161
69e68b4f
KS
162 pgd = pgd_offset(mm, address);
163 if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd)))
164 return no_page_table(vma, flags);
165
166 pud = pud_offset(pgd, address);
167 if (pud_none(*pud))
168 return no_page_table(vma, flags);
169 if (pud_huge(*pud) && vma->vm_flags & VM_HUGETLB) {
e66f17ff
NH
170 page = follow_huge_pud(mm, address, pud, flags);
171 if (page)
172 return page;
173 return no_page_table(vma, flags);
69e68b4f
KS
174 }
175 if (unlikely(pud_bad(*pud)))
176 return no_page_table(vma, flags);
177
178 pmd = pmd_offset(pud, address);
179 if (pmd_none(*pmd))
180 return no_page_table(vma, flags);
181 if (pmd_huge(*pmd) && vma->vm_flags & VM_HUGETLB) {
e66f17ff
NH
182 page = follow_huge_pmd(mm, address, pmd, flags);
183 if (page)
184 return page;
185 return no_page_table(vma, flags);
69e68b4f
KS
186 }
187 if ((flags & FOLL_NUMA) && pmd_numa(*pmd))
188 return no_page_table(vma, flags);
189 if (pmd_trans_huge(*pmd)) {
190 if (flags & FOLL_SPLIT) {
191 split_huge_page_pmd(vma, address, pmd);
192 return follow_page_pte(vma, address, pmd, flags);
193 }
194 ptl = pmd_lock(mm, pmd);
195 if (likely(pmd_trans_huge(*pmd))) {
196 if (unlikely(pmd_trans_splitting(*pmd))) {
197 spin_unlock(ptl);
198 wait_split_huge_page(vma->anon_vma, pmd);
199 } else {
200 page = follow_trans_huge_pmd(vma, address,
201 pmd, flags);
202 spin_unlock(ptl);
203 *page_mask = HPAGE_PMD_NR - 1;
204 return page;
205 }
206 } else
207 spin_unlock(ptl);
208 }
209 return follow_page_pte(vma, address, pmd, flags);
4bbd4c77
KS
210}
211
f2b495ca
KS
212static int get_gate_page(struct mm_struct *mm, unsigned long address,
213 unsigned int gup_flags, struct vm_area_struct **vma,
214 struct page **page)
215{
216 pgd_t *pgd;
217 pud_t *pud;
218 pmd_t *pmd;
219 pte_t *pte;
220 int ret = -EFAULT;
221
222 /* user gate pages are read-only */
223 if (gup_flags & FOLL_WRITE)
224 return -EFAULT;
225 if (address > TASK_SIZE)
226 pgd = pgd_offset_k(address);
227 else
228 pgd = pgd_offset_gate(mm, address);
229 BUG_ON(pgd_none(*pgd));
230 pud = pud_offset(pgd, address);
231 BUG_ON(pud_none(*pud));
232 pmd = pmd_offset(pud, address);
233 if (pmd_none(*pmd))
234 return -EFAULT;
235 VM_BUG_ON(pmd_trans_huge(*pmd));
236 pte = pte_offset_map(pmd, address);
237 if (pte_none(*pte))
238 goto unmap;
239 *vma = get_gate_vma(mm);
240 if (!page)
241 goto out;
242 *page = vm_normal_page(*vma, address, *pte);
243 if (!*page) {
244 if ((gup_flags & FOLL_DUMP) || !is_zero_pfn(pte_pfn(*pte)))
245 goto unmap;
246 *page = pte_page(*pte);
247 }
248 get_page(*page);
249out:
250 ret = 0;
251unmap:
252 pte_unmap(pte);
253 return ret;
254}
255
9a95f3cf
PC
256/*
257 * mmap_sem must be held on entry. If @nonblocking != NULL and
258 * *@flags does not include FOLL_NOWAIT, the mmap_sem may be released.
259 * If it is, *@nonblocking will be set to 0 and -EBUSY returned.
260 */
16744483
KS
261static int faultin_page(struct task_struct *tsk, struct vm_area_struct *vma,
262 unsigned long address, unsigned int *flags, int *nonblocking)
263{
264 struct mm_struct *mm = vma->vm_mm;
265 unsigned int fault_flags = 0;
266 int ret;
267
268 /* For mlock, just skip the stack guard page. */
269 if ((*flags & FOLL_MLOCK) &&
270 (stack_guard_page_start(vma, address) ||
271 stack_guard_page_end(vma, address + PAGE_SIZE)))
272 return -ENOENT;
273 if (*flags & FOLL_WRITE)
274 fault_flags |= FAULT_FLAG_WRITE;
275 if (nonblocking)
276 fault_flags |= FAULT_FLAG_ALLOW_RETRY;
277 if (*flags & FOLL_NOWAIT)
278 fault_flags |= FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_RETRY_NOWAIT;
234b239b
ALC
279 if (*flags & FOLL_TRIED) {
280 VM_WARN_ON_ONCE(fault_flags & FAULT_FLAG_ALLOW_RETRY);
281 fault_flags |= FAULT_FLAG_TRIED;
282 }
16744483
KS
283
284 ret = handle_mm_fault(mm, vma, address, fault_flags);
285 if (ret & VM_FAULT_ERROR) {
286 if (ret & VM_FAULT_OOM)
287 return -ENOMEM;
288 if (ret & (VM_FAULT_HWPOISON | VM_FAULT_HWPOISON_LARGE))
289 return *flags & FOLL_HWPOISON ? -EHWPOISON : -EFAULT;
33692f27 290 if (ret & (VM_FAULT_SIGBUS | VM_FAULT_SIGSEGV))
16744483
KS
291 return -EFAULT;
292 BUG();
293 }
294
295 if (tsk) {
296 if (ret & VM_FAULT_MAJOR)
297 tsk->maj_flt++;
298 else
299 tsk->min_flt++;
300 }
301
302 if (ret & VM_FAULT_RETRY) {
303 if (nonblocking)
304 *nonblocking = 0;
305 return -EBUSY;
306 }
307
308 /*
309 * The VM_FAULT_WRITE bit tells us that do_wp_page has broken COW when
310 * necessary, even if maybe_mkwrite decided not to set pte_write. We
311 * can thus safely do subsequent page lookups as if they were reads.
312 * But only do so when looping for pte_write is futile: in some cases
313 * userspace may also be wanting to write to the gotten user page,
314 * which a read fault here might prevent (a readonly page might get
315 * reCOWed by userspace write).
316 */
317 if ((ret & VM_FAULT_WRITE) && !(vma->vm_flags & VM_WRITE))
318 *flags &= ~FOLL_WRITE;
319 return 0;
320}
321
fa5bb209
KS
322static int check_vma_flags(struct vm_area_struct *vma, unsigned long gup_flags)
323{
324 vm_flags_t vm_flags = vma->vm_flags;
325
326 if (vm_flags & (VM_IO | VM_PFNMAP))
327 return -EFAULT;
328
329 if (gup_flags & FOLL_WRITE) {
330 if (!(vm_flags & VM_WRITE)) {
331 if (!(gup_flags & FOLL_FORCE))
332 return -EFAULT;
333 /*
334 * We used to let the write,force case do COW in a
335 * VM_MAYWRITE VM_SHARED !VM_WRITE vma, so ptrace could
336 * set a breakpoint in a read-only mapping of an
337 * executable, without corrupting the file (yet only
338 * when that file had been opened for writing!).
339 * Anon pages in shared mappings are surprising: now
340 * just reject it.
341 */
342 if (!is_cow_mapping(vm_flags)) {
343 WARN_ON_ONCE(vm_flags & VM_MAYWRITE);
344 return -EFAULT;
345 }
346 }
347 } else if (!(vm_flags & VM_READ)) {
348 if (!(gup_flags & FOLL_FORCE))
349 return -EFAULT;
350 /*
351 * Is there actually any vma we can reach here which does not
352 * have VM_MAYREAD set?
353 */
354 if (!(vm_flags & VM_MAYREAD))
355 return -EFAULT;
356 }
357 return 0;
358}
359
4bbd4c77
KS
360/**
361 * __get_user_pages() - pin user pages in memory
362 * @tsk: task_struct of target task
363 * @mm: mm_struct of target mm
364 * @start: starting user address
365 * @nr_pages: number of pages from start to pin
366 * @gup_flags: flags modifying pin behaviour
367 * @pages: array that receives pointers to the pages pinned.
368 * Should be at least nr_pages long. Or NULL, if caller
369 * only intends to ensure the pages are faulted in.
370 * @vmas: array of pointers to vmas corresponding to each page.
371 * Or NULL if the caller does not require them.
372 * @nonblocking: whether waiting for disk IO or mmap_sem contention
373 *
374 * Returns number of pages pinned. This may be fewer than the number
375 * requested. If nr_pages is 0 or negative, returns 0. If no pages
376 * were pinned, returns -errno. Each page returned must be released
377 * with a put_page() call when it is finished with. vmas will only
378 * remain valid while mmap_sem is held.
379 *
9a95f3cf 380 * Must be called with mmap_sem held. It may be released. See below.
4bbd4c77
KS
381 *
382 * __get_user_pages walks a process's page tables and takes a reference to
383 * each struct page that each user address corresponds to at a given
384 * instant. That is, it takes the page that would be accessed if a user
385 * thread accesses the given user virtual address at that instant.
386 *
387 * This does not guarantee that the page exists in the user mappings when
388 * __get_user_pages returns, and there may even be a completely different
389 * page there in some cases (eg. if mmapped pagecache has been invalidated
390 * and subsequently re faulted). However it does guarantee that the page
391 * won't be freed completely. And mostly callers simply care that the page
392 * contains data that was valid *at some point in time*. Typically, an IO
393 * or similar operation cannot guarantee anything stronger anyway because
394 * locks can't be held over the syscall boundary.
395 *
396 * If @gup_flags & FOLL_WRITE == 0, the page must not be written to. If
397 * the page is written to, set_page_dirty (or set_page_dirty_lock, as
398 * appropriate) must be called after the page is finished with, and
399 * before put_page is called.
400 *
401 * If @nonblocking != NULL, __get_user_pages will not wait for disk IO
402 * or mmap_sem contention, and if waiting is needed to pin all pages,
9a95f3cf
PC
403 * *@nonblocking will be set to 0. Further, if @gup_flags does not
404 * include FOLL_NOWAIT, the mmap_sem will be released via up_read() in
405 * this case.
406 *
407 * A caller using such a combination of @nonblocking and @gup_flags
408 * must therefore hold the mmap_sem for reading only, and recognize
409 * when it's been released. Otherwise, it must be held for either
410 * reading or writing and will not be released.
4bbd4c77
KS
411 *
412 * In most cases, get_user_pages or get_user_pages_fast should be used
413 * instead of __get_user_pages. __get_user_pages should be used only if
414 * you need some special @gup_flags.
415 */
416long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
417 unsigned long start, unsigned long nr_pages,
418 unsigned int gup_flags, struct page **pages,
419 struct vm_area_struct **vmas, int *nonblocking)
420{
fa5bb209 421 long i = 0;
4bbd4c77 422 unsigned int page_mask;
fa5bb209 423 struct vm_area_struct *vma = NULL;
4bbd4c77
KS
424
425 if (!nr_pages)
426 return 0;
427
428 VM_BUG_ON(!!pages != !!(gup_flags & FOLL_GET));
429
430 /*
431 * If FOLL_FORCE is set then do not force a full fault as the hinting
432 * fault information is unrelated to the reference behaviour of a task
433 * using the address space
434 */
435 if (!(gup_flags & FOLL_FORCE))
436 gup_flags |= FOLL_NUMA;
437
4bbd4c77 438 do {
fa5bb209
KS
439 struct page *page;
440 unsigned int foll_flags = gup_flags;
441 unsigned int page_increm;
442
443 /* first iteration or cross vma bound */
444 if (!vma || start >= vma->vm_end) {
445 vma = find_extend_vma(mm, start);
446 if (!vma && in_gate_area(mm, start)) {
447 int ret;
448 ret = get_gate_page(mm, start & PAGE_MASK,
449 gup_flags, &vma,
450 pages ? &pages[i] : NULL);
451 if (ret)
452 return i ? : ret;
453 page_mask = 0;
454 goto next_page;
455 }
4bbd4c77 456
fa5bb209
KS
457 if (!vma || check_vma_flags(vma, gup_flags))
458 return i ? : -EFAULT;
459 if (is_vm_hugetlb_page(vma)) {
460 i = follow_hugetlb_page(mm, vma, pages, vmas,
461 &start, &nr_pages, i,
462 gup_flags);
463 continue;
4bbd4c77 464 }
fa5bb209
KS
465 }
466retry:
467 /*
468 * If we have a pending SIGKILL, don't keep faulting pages and
469 * potentially allocating memory.
470 */
471 if (unlikely(fatal_signal_pending(current)))
472 return i ? i : -ERESTARTSYS;
473 cond_resched();
474 page = follow_page_mask(vma, start, foll_flags, &page_mask);
475 if (!page) {
476 int ret;
477 ret = faultin_page(tsk, vma, start, &foll_flags,
478 nonblocking);
479 switch (ret) {
480 case 0:
481 goto retry;
482 case -EFAULT:
483 case -ENOMEM:
484 case -EHWPOISON:
485 return i ? i : ret;
486 case -EBUSY:
487 return i;
488 case -ENOENT:
489 goto next_page;
4bbd4c77 490 }
fa5bb209 491 BUG();
4bbd4c77 492 }
fa5bb209
KS
493 if (IS_ERR(page))
494 return i ? i : PTR_ERR(page);
495 if (pages) {
496 pages[i] = page;
497 flush_anon_page(vma, page, start);
498 flush_dcache_page(page);
499 page_mask = 0;
4bbd4c77 500 }
4bbd4c77 501next_page:
fa5bb209
KS
502 if (vmas) {
503 vmas[i] = vma;
504 page_mask = 0;
505 }
506 page_increm = 1 + (~(start >> PAGE_SHIFT) & page_mask);
507 if (page_increm > nr_pages)
508 page_increm = nr_pages;
509 i += page_increm;
510 start += page_increm * PAGE_SIZE;
511 nr_pages -= page_increm;
4bbd4c77
KS
512 } while (nr_pages);
513 return i;
4bbd4c77
KS
514}
515EXPORT_SYMBOL(__get_user_pages);
516
517/*
518 * fixup_user_fault() - manually resolve a user page fault
519 * @tsk: the task_struct to use for page fault accounting, or
520 * NULL if faults are not to be recorded.
521 * @mm: mm_struct of target mm
522 * @address: user address
523 * @fault_flags:flags to pass down to handle_mm_fault()
524 *
525 * This is meant to be called in the specific scenario where for locking reasons
526 * we try to access user memory in atomic context (within a pagefault_disable()
527 * section), this returns -EFAULT, and we want to resolve the user fault before
528 * trying again.
529 *
530 * Typically this is meant to be used by the futex code.
531 *
532 * The main difference with get_user_pages() is that this function will
533 * unconditionally call handle_mm_fault() which will in turn perform all the
534 * necessary SW fixup of the dirty and young bits in the PTE, while
535 * handle_mm_fault() only guarantees to update these in the struct page.
536 *
537 * This is important for some architectures where those bits also gate the
538 * access permission to the page because they are maintained in software. On
539 * such architectures, gup() will not be enough to make a subsequent access
540 * succeed.
541 *
9a95f3cf 542 * This has the same semantics wrt the @mm->mmap_sem as does filemap_fault().
4bbd4c77
KS
543 */
544int fixup_user_fault(struct task_struct *tsk, struct mm_struct *mm,
545 unsigned long address, unsigned int fault_flags)
546{
547 struct vm_area_struct *vma;
548 vm_flags_t vm_flags;
549 int ret;
550
551 vma = find_extend_vma(mm, address);
552 if (!vma || address < vma->vm_start)
553 return -EFAULT;
554
555 vm_flags = (fault_flags & FAULT_FLAG_WRITE) ? VM_WRITE : VM_READ;
556 if (!(vm_flags & vma->vm_flags))
557 return -EFAULT;
558
559 ret = handle_mm_fault(mm, vma, address, fault_flags);
560 if (ret & VM_FAULT_ERROR) {
561 if (ret & VM_FAULT_OOM)
562 return -ENOMEM;
563 if (ret & (VM_FAULT_HWPOISON | VM_FAULT_HWPOISON_LARGE))
564 return -EHWPOISON;
33692f27 565 if (ret & (VM_FAULT_SIGBUS | VM_FAULT_SIGSEGV))
4bbd4c77
KS
566 return -EFAULT;
567 BUG();
568 }
569 if (tsk) {
570 if (ret & VM_FAULT_MAJOR)
571 tsk->maj_flt++;
572 else
573 tsk->min_flt++;
574 }
575 return 0;
576}
577
f0818f47
AA
578static __always_inline long __get_user_pages_locked(struct task_struct *tsk,
579 struct mm_struct *mm,
580 unsigned long start,
581 unsigned long nr_pages,
582 int write, int force,
583 struct page **pages,
584 struct vm_area_struct **vmas,
585 int *locked, bool notify_drop)
586{
587 int flags = FOLL_TOUCH;
588 long ret, pages_done;
589 bool lock_dropped;
590
591 if (locked) {
592 /* if VM_FAULT_RETRY can be returned, vmas become invalid */
593 BUG_ON(vmas);
594 /* check caller initialized locked */
595 BUG_ON(*locked != 1);
596 }
597
598 if (pages)
599 flags |= FOLL_GET;
600 if (write)
601 flags |= FOLL_WRITE;
602 if (force)
603 flags |= FOLL_FORCE;
604
605 pages_done = 0;
606 lock_dropped = false;
607 for (;;) {
608 ret = __get_user_pages(tsk, mm, start, nr_pages, flags, pages,
609 vmas, locked);
610 if (!locked)
611 /* VM_FAULT_RETRY couldn't trigger, bypass */
612 return ret;
613
614 /* VM_FAULT_RETRY cannot return errors */
615 if (!*locked) {
616 BUG_ON(ret < 0);
617 BUG_ON(ret >= nr_pages);
618 }
619
620 if (!pages)
621 /* If it's a prefault don't insist harder */
622 return ret;
623
624 if (ret > 0) {
625 nr_pages -= ret;
626 pages_done += ret;
627 if (!nr_pages)
628 break;
629 }
630 if (*locked) {
631 /* VM_FAULT_RETRY didn't trigger */
632 if (!pages_done)
633 pages_done = ret;
634 break;
635 }
636 /* VM_FAULT_RETRY triggered, so seek to the faulting offset */
637 pages += ret;
638 start += ret << PAGE_SHIFT;
639
640 /*
641 * Repeat on the address that fired VM_FAULT_RETRY
642 * without FAULT_FLAG_ALLOW_RETRY but with
643 * FAULT_FLAG_TRIED.
644 */
645 *locked = 1;
646 lock_dropped = true;
647 down_read(&mm->mmap_sem);
648 ret = __get_user_pages(tsk, mm, start, 1, flags | FOLL_TRIED,
649 pages, NULL, NULL);
650 if (ret != 1) {
651 BUG_ON(ret > 1);
652 if (!pages_done)
653 pages_done = ret;
654 break;
655 }
656 nr_pages--;
657 pages_done++;
658 if (!nr_pages)
659 break;
660 pages++;
661 start += PAGE_SIZE;
662 }
663 if (notify_drop && lock_dropped && *locked) {
664 /*
665 * We must let the caller know we temporarily dropped the lock
666 * and so the critical section protected by it was lost.
667 */
668 up_read(&mm->mmap_sem);
669 *locked = 0;
670 }
671 return pages_done;
672}
673
674/*
675 * We can leverage the VM_FAULT_RETRY functionality in the page fault
676 * paths better by using either get_user_pages_locked() or
677 * get_user_pages_unlocked().
678 *
679 * get_user_pages_locked() is suitable to replace the form:
680 *
681 * down_read(&mm->mmap_sem);
682 * do_something()
683 * get_user_pages(tsk, mm, ..., pages, NULL);
684 * up_read(&mm->mmap_sem);
685 *
686 * to:
687 *
688 * int locked = 1;
689 * down_read(&mm->mmap_sem);
690 * do_something()
691 * get_user_pages_locked(tsk, mm, ..., pages, &locked);
692 * if (locked)
693 * up_read(&mm->mmap_sem);
694 */
695long get_user_pages_locked(struct task_struct *tsk, struct mm_struct *mm,
696 unsigned long start, unsigned long nr_pages,
697 int write, int force, struct page **pages,
698 int *locked)
699{
700 return __get_user_pages_locked(tsk, mm, start, nr_pages, write, force,
701 pages, NULL, locked, true);
702}
703EXPORT_SYMBOL(get_user_pages_locked);
704
705/*
706 * get_user_pages_unlocked() is suitable to replace the form:
707 *
708 * down_read(&mm->mmap_sem);
709 * get_user_pages(tsk, mm, ..., pages, NULL);
710 * up_read(&mm->mmap_sem);
711 *
712 * with:
713 *
714 * get_user_pages_unlocked(tsk, mm, ..., pages);
715 *
716 * It is functionally equivalent to get_user_pages_fast so
717 * get_user_pages_fast should be used instead, if the two parameters
718 * "tsk" and "mm" are respectively equal to current and current->mm,
719 * or if "force" shall be set to 1 (get_user_pages_fast misses the
720 * "force" parameter).
721 */
722long get_user_pages_unlocked(struct task_struct *tsk, struct mm_struct *mm,
723 unsigned long start, unsigned long nr_pages,
724 int write, int force, struct page **pages)
725{
726 long ret;
727 int locked = 1;
728 down_read(&mm->mmap_sem);
729 ret = __get_user_pages_locked(tsk, mm, start, nr_pages, write, force,
730 pages, NULL, &locked, false);
731 if (locked)
732 up_read(&mm->mmap_sem);
733 return ret;
734}
735EXPORT_SYMBOL(get_user_pages_unlocked);
736
4bbd4c77
KS
737/*
738 * get_user_pages() - pin user pages in memory
739 * @tsk: the task_struct to use for page fault accounting, or
740 * NULL if faults are not to be recorded.
741 * @mm: mm_struct of target mm
742 * @start: starting user address
743 * @nr_pages: number of pages from start to pin
744 * @write: whether pages will be written to by the caller
745 * @force: whether to force access even when user mapping is currently
746 * protected (but never forces write access to shared mapping).
747 * @pages: array that receives pointers to the pages pinned.
748 * Should be at least nr_pages long. Or NULL, if caller
749 * only intends to ensure the pages are faulted in.
750 * @vmas: array of pointers to vmas corresponding to each page.
751 * Or NULL if the caller does not require them.
752 *
753 * Returns number of pages pinned. This may be fewer than the number
754 * requested. If nr_pages is 0 or negative, returns 0. If no pages
755 * were pinned, returns -errno. Each page returned must be released
756 * with a put_page() call when it is finished with. vmas will only
757 * remain valid while mmap_sem is held.
758 *
759 * Must be called with mmap_sem held for read or write.
760 *
761 * get_user_pages walks a process's page tables and takes a reference to
762 * each struct page that each user address corresponds to at a given
763 * instant. That is, it takes the page that would be accessed if a user
764 * thread accesses the given user virtual address at that instant.
765 *
766 * This does not guarantee that the page exists in the user mappings when
767 * get_user_pages returns, and there may even be a completely different
768 * page there in some cases (eg. if mmapped pagecache has been invalidated
769 * and subsequently re faulted). However it does guarantee that the page
770 * won't be freed completely. And mostly callers simply care that the page
771 * contains data that was valid *at some point in time*. Typically, an IO
772 * or similar operation cannot guarantee anything stronger anyway because
773 * locks can't be held over the syscall boundary.
774 *
775 * If write=0, the page must not be written to. If the page is written to,
776 * set_page_dirty (or set_page_dirty_lock, as appropriate) must be called
777 * after the page is finished with, and before put_page is called.
778 *
779 * get_user_pages is typically used for fewer-copy IO operations, to get a
780 * handle on the memory by some means other than accesses via the user virtual
781 * addresses. The pages may be submitted for DMA to devices or accessed via
782 * their kernel linear mapping (via the kmap APIs). Care should be taken to
783 * use the correct cache flushing APIs.
784 *
785 * See also get_user_pages_fast, for performance critical applications.
f0818f47
AA
786 *
787 * get_user_pages should be phased out in favor of
788 * get_user_pages_locked|unlocked or get_user_pages_fast. Nothing
789 * should use get_user_pages because it cannot pass
790 * FAULT_FLAG_ALLOW_RETRY to handle_mm_fault.
4bbd4c77
KS
791 */
792long get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
793 unsigned long start, unsigned long nr_pages, int write,
794 int force, struct page **pages, struct vm_area_struct **vmas)
795{
f0818f47
AA
796 return __get_user_pages_locked(tsk, mm, start, nr_pages, write, force,
797 pages, vmas, NULL, false);
4bbd4c77
KS
798}
799EXPORT_SYMBOL(get_user_pages);
800
801/**
802 * get_dump_page() - pin user page in memory while writing it to core dump
803 * @addr: user address
804 *
805 * Returns struct page pointer of user page pinned for dump,
806 * to be freed afterwards by page_cache_release() or put_page().
807 *
808 * Returns NULL on any kind of failure - a hole must then be inserted into
809 * the corefile, to preserve alignment with its headers; and also returns
810 * NULL wherever the ZERO_PAGE, or an anonymous pte_none, has been found -
811 * allowing a hole to be left in the corefile to save diskspace.
812 *
813 * Called without mmap_sem, but after all other threads have been killed.
814 */
815#ifdef CONFIG_ELF_CORE
816struct page *get_dump_page(unsigned long addr)
817{
818 struct vm_area_struct *vma;
819 struct page *page;
820
821 if (__get_user_pages(current, current->mm, addr, 1,
822 FOLL_FORCE | FOLL_DUMP | FOLL_GET, &page, &vma,
823 NULL) < 1)
824 return NULL;
825 flush_cache_page(vma, addr, page_to_pfn(page));
826 return page;
827}
828#endif /* CONFIG_ELF_CORE */
2667f50e
SC
829
830/*
831 * Generic RCU Fast GUP
832 *
833 * get_user_pages_fast attempts to pin user pages by walking the page
834 * tables directly and avoids taking locks. Thus the walker needs to be
835 * protected from page table pages being freed from under it, and should
836 * block any THP splits.
837 *
838 * One way to achieve this is to have the walker disable interrupts, and
839 * rely on IPIs from the TLB flushing code blocking before the page table
840 * pages are freed. This is unsuitable for architectures that do not need
841 * to broadcast an IPI when invalidating TLBs.
842 *
843 * Another way to achieve this is to batch up page table containing pages
844 * belonging to more than one mm_user, then rcu_sched a callback to free those
845 * pages. Disabling interrupts will allow the fast_gup walker to both block
846 * the rcu_sched callback, and an IPI that we broadcast for splitting THPs
847 * (which is a relatively rare event). The code below adopts this strategy.
848 *
849 * Before activating this code, please be aware that the following assumptions
850 * are currently made:
851 *
852 * *) HAVE_RCU_TABLE_FREE is enabled, and tlb_remove_table is used to free
853 * pages containing page tables.
854 *
855 * *) THP splits will broadcast an IPI, this can be achieved by overriding
856 * pmdp_splitting_flush.
857 *
858 * *) ptes can be read atomically by the architecture.
859 *
860 * *) access_ok is sufficient to validate userspace address ranges.
861 *
862 * The last two assumptions can be relaxed by the addition of helper functions.
863 *
864 * This code is based heavily on the PowerPC implementation by Nick Piggin.
865 */
866#ifdef CONFIG_HAVE_GENERIC_RCU_GUP
867
868#ifdef __HAVE_ARCH_PTE_SPECIAL
869static int gup_pte_range(pmd_t pmd, unsigned long addr, unsigned long end,
870 int write, struct page **pages, int *nr)
871{
872 pte_t *ptep, *ptem;
873 int ret = 0;
874
875 ptem = ptep = pte_offset_map(&pmd, addr);
876 do {
877 /*
878 * In the line below we are assuming that the pte can be read
879 * atomically. If this is not the case for your architecture,
880 * please wrap this in a helper function!
881 *
882 * for an example see gup_get_pte in arch/x86/mm/gup.c
883 */
884 pte_t pte = ACCESS_ONCE(*ptep);
885 struct page *page;
886
887 /*
888 * Similar to the PMD case below, NUMA hinting must take slow
889 * path
890 */
891 if (!pte_present(pte) || pte_special(pte) ||
892 pte_numa(pte) || (write && !pte_write(pte)))
893 goto pte_unmap;
894
895 VM_BUG_ON(!pfn_valid(pte_pfn(pte)));
896 page = pte_page(pte);
897
898 if (!page_cache_get_speculative(page))
899 goto pte_unmap;
900
901 if (unlikely(pte_val(pte) != pte_val(*ptep))) {
902 put_page(page);
903 goto pte_unmap;
904 }
905
906 pages[*nr] = page;
907 (*nr)++;
908
909 } while (ptep++, addr += PAGE_SIZE, addr != end);
910
911 ret = 1;
912
913pte_unmap:
914 pte_unmap(ptem);
915 return ret;
916}
917#else
918
919/*
920 * If we can't determine whether or not a pte is special, then fail immediately
921 * for ptes. Note, we can still pin HugeTLB and THP as these are guaranteed not
922 * to be special.
923 *
924 * For a futex to be placed on a THP tail page, get_futex_key requires a
925 * __get_user_pages_fast implementation that can pin pages. Thus it's still
926 * useful to have gup_huge_pmd even if we can't operate on ptes.
927 */
928static int gup_pte_range(pmd_t pmd, unsigned long addr, unsigned long end,
929 int write, struct page **pages, int *nr)
930{
931 return 0;
932}
933#endif /* __HAVE_ARCH_PTE_SPECIAL */
934
935static int gup_huge_pmd(pmd_t orig, pmd_t *pmdp, unsigned long addr,
936 unsigned long end, int write, struct page **pages, int *nr)
937{
938 struct page *head, *page, *tail;
939 int refs;
940
941 if (write && !pmd_write(orig))
942 return 0;
943
944 refs = 0;
945 head = pmd_page(orig);
946 page = head + ((addr & ~PMD_MASK) >> PAGE_SHIFT);
947 tail = page;
948 do {
949 VM_BUG_ON_PAGE(compound_head(page) != head, page);
950 pages[*nr] = page;
951 (*nr)++;
952 page++;
953 refs++;
954 } while (addr += PAGE_SIZE, addr != end);
955
956 if (!page_cache_add_speculative(head, refs)) {
957 *nr -= refs;
958 return 0;
959 }
960
961 if (unlikely(pmd_val(orig) != pmd_val(*pmdp))) {
962 *nr -= refs;
963 while (refs--)
964 put_page(head);
965 return 0;
966 }
967
968 /*
969 * Any tail pages need their mapcount reference taken before we
970 * return. (This allows the THP code to bump their ref count when
971 * they are split into base pages).
972 */
973 while (refs--) {
974 if (PageTail(tail))
975 get_huge_page_tail(tail);
976 tail++;
977 }
978
979 return 1;
980}
981
982static int gup_huge_pud(pud_t orig, pud_t *pudp, unsigned long addr,
983 unsigned long end, int write, struct page **pages, int *nr)
984{
985 struct page *head, *page, *tail;
986 int refs;
987
988 if (write && !pud_write(orig))
989 return 0;
990
991 refs = 0;
992 head = pud_page(orig);
993 page = head + ((addr & ~PUD_MASK) >> PAGE_SHIFT);
994 tail = page;
995 do {
996 VM_BUG_ON_PAGE(compound_head(page) != head, page);
997 pages[*nr] = page;
998 (*nr)++;
999 page++;
1000 refs++;
1001 } while (addr += PAGE_SIZE, addr != end);
1002
1003 if (!page_cache_add_speculative(head, refs)) {
1004 *nr -= refs;
1005 return 0;
1006 }
1007
1008 if (unlikely(pud_val(orig) != pud_val(*pudp))) {
1009 *nr -= refs;
1010 while (refs--)
1011 put_page(head);
1012 return 0;
1013 }
1014
1015 while (refs--) {
1016 if (PageTail(tail))
1017 get_huge_page_tail(tail);
1018 tail++;
1019 }
1020
1021 return 1;
1022}
1023
f30c59e9
AK
1024static int gup_huge_pgd(pgd_t orig, pgd_t *pgdp, unsigned long addr,
1025 unsigned long end, int write,
1026 struct page **pages, int *nr)
1027{
1028 int refs;
1029 struct page *head, *page, *tail;
1030
1031 if (write && !pgd_write(orig))
1032 return 0;
1033
1034 refs = 0;
1035 head = pgd_page(orig);
1036 page = head + ((addr & ~PGDIR_MASK) >> PAGE_SHIFT);
1037 tail = page;
1038 do {
1039 VM_BUG_ON_PAGE(compound_head(page) != head, page);
1040 pages[*nr] = page;
1041 (*nr)++;
1042 page++;
1043 refs++;
1044 } while (addr += PAGE_SIZE, addr != end);
1045
1046 if (!page_cache_add_speculative(head, refs)) {
1047 *nr -= refs;
1048 return 0;
1049 }
1050
1051 if (unlikely(pgd_val(orig) != pgd_val(*pgdp))) {
1052 *nr -= refs;
1053 while (refs--)
1054 put_page(head);
1055 return 0;
1056 }
1057
1058 while (refs--) {
1059 if (PageTail(tail))
1060 get_huge_page_tail(tail);
1061 tail++;
1062 }
1063
1064 return 1;
1065}
1066
2667f50e
SC
1067static int gup_pmd_range(pud_t pud, unsigned long addr, unsigned long end,
1068 int write, struct page **pages, int *nr)
1069{
1070 unsigned long next;
1071 pmd_t *pmdp;
1072
1073 pmdp = pmd_offset(&pud, addr);
1074 do {
1075 pmd_t pmd = ACCESS_ONCE(*pmdp);
1076
1077 next = pmd_addr_end(addr, end);
1078 if (pmd_none(pmd) || pmd_trans_splitting(pmd))
1079 return 0;
1080
1081 if (unlikely(pmd_trans_huge(pmd) || pmd_huge(pmd))) {
1082 /*
1083 * NUMA hinting faults need to be handled in the GUP
1084 * slowpath for accounting purposes and so that they
1085 * can be serialised against THP migration.
1086 */
1087 if (pmd_numa(pmd))
1088 return 0;
1089
1090 if (!gup_huge_pmd(pmd, pmdp, addr, next, write,
1091 pages, nr))
1092 return 0;
1093
f30c59e9
AK
1094 } else if (unlikely(is_hugepd(__hugepd(pmd_val(pmd))))) {
1095 /*
1096 * architecture have different format for hugetlbfs
1097 * pmd format and THP pmd format
1098 */
1099 if (!gup_huge_pd(__hugepd(pmd_val(pmd)), addr,
1100 PMD_SHIFT, next, write, pages, nr))
1101 return 0;
2667f50e
SC
1102 } else if (!gup_pte_range(pmd, addr, next, write, pages, nr))
1103 return 0;
1104 } while (pmdp++, addr = next, addr != end);
1105
1106 return 1;
1107}
1108
f30c59e9
AK
1109static int gup_pud_range(pgd_t pgd, unsigned long addr, unsigned long end,
1110 int write, struct page **pages, int *nr)
2667f50e
SC
1111{
1112 unsigned long next;
1113 pud_t *pudp;
1114
f30c59e9 1115 pudp = pud_offset(&pgd, addr);
2667f50e 1116 do {
e37c6982 1117 pud_t pud = READ_ONCE(*pudp);
2667f50e
SC
1118
1119 next = pud_addr_end(addr, end);
1120 if (pud_none(pud))
1121 return 0;
f30c59e9 1122 if (unlikely(pud_huge(pud))) {
2667f50e 1123 if (!gup_huge_pud(pud, pudp, addr, next, write,
f30c59e9
AK
1124 pages, nr))
1125 return 0;
1126 } else if (unlikely(is_hugepd(__hugepd(pud_val(pud))))) {
1127 if (!gup_huge_pd(__hugepd(pud_val(pud)), addr,
1128 PUD_SHIFT, next, write, pages, nr))
2667f50e
SC
1129 return 0;
1130 } else if (!gup_pmd_range(pud, addr, next, write, pages, nr))
1131 return 0;
1132 } while (pudp++, addr = next, addr != end);
1133
1134 return 1;
1135}
1136
1137/*
1138 * Like get_user_pages_fast() except it's IRQ-safe in that it won't fall back to
1139 * the regular GUP. It will only return non-negative values.
1140 */
1141int __get_user_pages_fast(unsigned long start, int nr_pages, int write,
1142 struct page **pages)
1143{
1144 struct mm_struct *mm = current->mm;
1145 unsigned long addr, len, end;
1146 unsigned long next, flags;
1147 pgd_t *pgdp;
1148 int nr = 0;
1149
1150 start &= PAGE_MASK;
1151 addr = start;
1152 len = (unsigned long) nr_pages << PAGE_SHIFT;
1153 end = start + len;
1154
1155 if (unlikely(!access_ok(write ? VERIFY_WRITE : VERIFY_READ,
1156 start, len)))
1157 return 0;
1158
1159 /*
1160 * Disable interrupts. We use the nested form as we can already have
1161 * interrupts disabled by get_futex_key.
1162 *
1163 * With interrupts disabled, we block page table pages from being
1164 * freed from under us. See mmu_gather_tlb in asm-generic/tlb.h
1165 * for more details.
1166 *
1167 * We do not adopt an rcu_read_lock(.) here as we also want to
1168 * block IPIs that come from THPs splitting.
1169 */
1170
1171 local_irq_save(flags);
1172 pgdp = pgd_offset(mm, addr);
1173 do {
f30c59e9
AK
1174 pgd_t pgd = ACCESS_ONCE(*pgdp);
1175
2667f50e 1176 next = pgd_addr_end(addr, end);
f30c59e9 1177 if (pgd_none(pgd))
2667f50e 1178 break;
f30c59e9
AK
1179 if (unlikely(pgd_huge(pgd))) {
1180 if (!gup_huge_pgd(pgd, pgdp, addr, next, write,
1181 pages, &nr))
1182 break;
1183 } else if (unlikely(is_hugepd(__hugepd(pgd_val(pgd))))) {
1184 if (!gup_huge_pd(__hugepd(pgd_val(pgd)), addr,
1185 PGDIR_SHIFT, next, write, pages, &nr))
1186 break;
1187 } else if (!gup_pud_range(pgd, addr, next, write, pages, &nr))
2667f50e
SC
1188 break;
1189 } while (pgdp++, addr = next, addr != end);
1190 local_irq_restore(flags);
1191
1192 return nr;
1193}
1194
1195/**
1196 * get_user_pages_fast() - pin user pages in memory
1197 * @start: starting user address
1198 * @nr_pages: number of pages from start to pin
1199 * @write: whether pages will be written to
1200 * @pages: array that receives pointers to the pages pinned.
1201 * Should be at least nr_pages long.
1202 *
1203 * Attempt to pin user pages in memory without taking mm->mmap_sem.
1204 * If not successful, it will fall back to taking the lock and
1205 * calling get_user_pages().
1206 *
1207 * Returns number of pages pinned. This may be fewer than the number
1208 * requested. If nr_pages is 0 or negative, returns 0. If no pages
1209 * were pinned, returns -errno.
1210 */
1211int get_user_pages_fast(unsigned long start, int nr_pages, int write,
1212 struct page **pages)
1213{
1214 struct mm_struct *mm = current->mm;
1215 int nr, ret;
1216
1217 start &= PAGE_MASK;
1218 nr = __get_user_pages_fast(start, nr_pages, write, pages);
1219 ret = nr;
1220
1221 if (nr < nr_pages) {
1222 /* Try to get the remaining pages with get_user_pages */
1223 start += nr << PAGE_SHIFT;
1224 pages += nr;
1225
1226 down_read(&mm->mmap_sem);
1227 ret = get_user_pages(current, mm, start,
1228 nr_pages - nr, write, 0, pages, NULL);
1229 up_read(&mm->mmap_sem);
1230
1231 /* Have to be a bit careful with return values */
1232 if (nr > 0) {
1233 if (ret < 0)
1234 ret = nr;
1235 else
1236 ret += nr;
1237 }
1238 }
1239
1240 return ret;
1241}
1242
1243#endif /* CONFIG_HAVE_GENERIC_RCU_GUP */