mm/gup: handle hugetlb for no_page_table()
authorPeter Xu <peterx@redhat.com>
Wed, 27 Mar 2024 15:23:27 +0000 (11:23 -0400)
committerAndrew Morton <akpm@linux-foundation.org>
Fri, 26 Apr 2024 03:56:22 +0000 (20:56 -0700)
no_page_table() is not yet used for hugetlb code paths.  Make it prepared.

The major difference here is hugetlb will return -EFAULT as long as page
cache does not exist, even if VM_SHARED.  See hugetlb_follow_page_mask().

Pass "address" into no_page_table() too, as hugetlb will need it.

Link: https://lkml.kernel.org/r/20240327152332.950956-9-peterx@redhat.com
Signed-off-by: Peter Xu <peterx@redhat.com>
Reviewed-by: Christoph Hellwig <hch@infradead.org>
Reviewed-by: Jason Gunthorpe <jgg@nvidia.com>
Tested-by: Ryan Roberts <ryan.roberts@arm.com>
Cc: Andrea Arcangeli <aarcange@redhat.com>
Cc: Andrew Jones <andrew.jones@linux.dev>
Cc: Aneesh Kumar K.V (IBM) <aneesh.kumar@kernel.org>
Cc: Axel Rasmussen <axelrasmussen@google.com>
Cc: Christophe Leroy <christophe.leroy@csgroup.eu>
Cc: David Hildenbrand <david@redhat.com>
Cc: James Houghton <jthoughton@google.com>
Cc: John Hubbard <jhubbard@nvidia.com>
Cc: Kirill A. Shutemov <kirill@shutemov.name>
Cc: Lorenzo Stoakes <lstoakes@gmail.com>
Cc: Matthew Wilcox (Oracle) <willy@infradead.org>
Cc: Michael Ellerman <mpe@ellerman.id.au>
Cc: "Mike Rapoport (IBM)" <rppt@kernel.org>
Cc: Muchun Song <muchun.song@linux.dev>
Cc: Rik van Riel <riel@surriel.com>
Cc: Vlastimil Babka <vbabka@suse.cz>
Cc: Yang Shi <shy828301@gmail.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
mm/gup.c

index e83e262ea8e95cb520b091463803c62ea1c366cd..1a1e459fd1382e87e49e0d4e7d76fd808753ec99 100644 (file)
--- a/mm/gup.c
+++ b/mm/gup.c
@@ -501,19 +501,27 @@ static inline void mm_set_has_pinned_flag(unsigned long *mm_flags)
 
 #ifdef CONFIG_MMU
 static struct page *no_page_table(struct vm_area_struct *vma,
-               unsigned int flags)
+                                 unsigned int flags, unsigned long address)
 {
+       if (!(flags & FOLL_DUMP))
+               return NULL;
+
        /*
-        * When core dumping an enormous anonymous area that nobody
-        * has touched so far, we don't want to allocate unnecessary pages or
+        * When core dumping, we don't want to allocate unnecessary pages or
         * page tables.  Return error instead of NULL to skip handle_mm_fault,
         * then get_dump_page() will return NULL to leave a hole in the dump.
         * But we can only make this optimization where a hole would surely
         * be zero-filled if handle_mm_fault() actually did handle it.
         */
-       if ((flags & FOLL_DUMP) &&
-                       (vma_is_anonymous(vma) || !vma->vm_ops->fault))
+       if (is_vm_hugetlb_page(vma)) {
+               struct hstate *h = hstate_vma(vma);
+
+               if (!hugetlbfs_pagecache_present(h, vma, address))
+                       return ERR_PTR(-EFAULT);
+       } else if ((vma_is_anonymous(vma) || !vma->vm_ops->fault)) {
                return ERR_PTR(-EFAULT);
+       }
+
        return NULL;
 }
 
@@ -593,7 +601,7 @@ static struct page *follow_page_pte(struct vm_area_struct *vma,
 
        ptep = pte_offset_map_lock(mm, pmd, address, &ptl);
        if (!ptep)
-               return no_page_table(vma, flags);
+               return no_page_table(vma, flags, address);
        pte = ptep_get(ptep);
        if (!pte_present(pte))
                goto no_page;
@@ -685,7 +693,7 @@ no_page:
        pte_unmap_unlock(ptep, ptl);
        if (!pte_none(pte))
                return NULL;
-       return no_page_table(vma, flags);
+       return no_page_table(vma, flags, address);
 }
 
 static struct page *follow_pmd_mask(struct vm_area_struct *vma,
@@ -701,27 +709,27 @@ static struct page *follow_pmd_mask(struct vm_area_struct *vma,
        pmd = pmd_offset(pudp, address);
        pmdval = pmdp_get_lockless(pmd);
        if (pmd_none(pmdval))
-               return no_page_table(vma, flags);
+               return no_page_table(vma, flags, address);
        if (!pmd_present(pmdval))
-               return no_page_table(vma, flags);
+               return no_page_table(vma, flags, address);
        if (pmd_devmap(pmdval)) {
                ptl = pmd_lock(mm, pmd);
                page = follow_devmap_pmd(vma, address, pmd, flags, &ctx->pgmap);
                spin_unlock(ptl);
                if (page)
                        return page;
-               return no_page_table(vma, flags);
+               return no_page_table(vma, flags, address);
        }
        if (likely(!pmd_trans_huge(pmdval)))
                return follow_page_pte(vma, address, pmd, flags, &ctx->pgmap);
 
        if (pmd_protnone(pmdval) && !gup_can_follow_protnone(vma, flags))
-               return no_page_table(vma, flags);
+               return no_page_table(vma, flags, address);
 
        ptl = pmd_lock(mm, pmd);
        if (unlikely(!pmd_present(*pmd))) {
                spin_unlock(ptl);
-               return no_page_table(vma, flags);
+               return no_page_table(vma, flags, address);
        }
        if (unlikely(!pmd_trans_huge(*pmd))) {
                spin_unlock(ptl);
@@ -752,17 +760,17 @@ static struct page *follow_pud_mask(struct vm_area_struct *vma,
 
        pud = pud_offset(p4dp, address);
        if (pud_none(*pud))
-               return no_page_table(vma, flags);
+               return no_page_table(vma, flags, address);
        if (pud_devmap(*pud)) {
                ptl = pud_lock(mm, pud);
                page = follow_devmap_pud(vma, address, pud, flags, &ctx->pgmap);
                spin_unlock(ptl);
                if (page)
                        return page;
-               return no_page_table(vma, flags);
+               return no_page_table(vma, flags, address);
        }
        if (unlikely(pud_bad(*pud)))
-               return no_page_table(vma, flags);
+               return no_page_table(vma, flags, address);
 
        return follow_pmd_mask(vma, address, pud, flags, ctx);
 }
@@ -777,10 +785,10 @@ static struct page *follow_p4d_mask(struct vm_area_struct *vma,
        p4dp = p4d_offset(pgdp, address);
        p4d = READ_ONCE(*p4dp);
        if (!p4d_present(p4d))
-               return no_page_table(vma, flags);
+               return no_page_table(vma, flags, address);
        BUILD_BUG_ON(p4d_leaf(p4d));
        if (unlikely(p4d_bad(p4d)))
-               return no_page_table(vma, flags);
+               return no_page_table(vma, flags, address);
 
        return follow_pud_mask(vma, address, p4dp, flags, ctx);
 }
@@ -830,7 +838,7 @@ static struct page *follow_page_mask(struct vm_area_struct *vma,
        pgd = pgd_offset(mm, address);
 
        if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd)))
-               return no_page_table(vma, flags);
+               return no_page_table(vma, flags, address);
 
        return follow_p4d_mask(vma, address, pgd, flags, ctx);
 }