mm/gup: handle huge pud for follow_pud_mask()
[linux-2.6-block.git] / mm / gup.c
index 39224b5fe62f172fa3d3e709bfb1ed7f281ac678..2b06d59f2fa3b0c5c03b3709fbcef921132af28a 100644 (file)
--- a/mm/gup.c
+++ b/mm/gup.c
@@ -525,6 +525,70 @@ static struct page *no_page_table(struct vm_area_struct *vma,
        return NULL;
 }
 
+#ifdef CONFIG_PGTABLE_HAS_HUGE_LEAVES
+static struct page *follow_huge_pud(struct vm_area_struct *vma,
+                                   unsigned long addr, pud_t *pudp,
+                                   int flags, struct follow_page_context *ctx)
+{
+       struct mm_struct *mm = vma->vm_mm;
+       struct page *page;
+       pud_t pud = *pudp;
+       unsigned long pfn = pud_pfn(pud);
+       int ret;
+
+       assert_spin_locked(pud_lockptr(mm, pudp));
+
+       if ((flags & FOLL_WRITE) && !pud_write(pud))
+               return NULL;
+
+       if (!pud_present(pud))
+               return NULL;
+
+       pfn += (addr & ~PUD_MASK) >> PAGE_SHIFT;
+
+       if (IS_ENABLED(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD) &&
+           pud_devmap(pud)) {
+               /*
+                * device mapped pages can only be returned if the caller
+                * will manage the page reference count.
+                *
+                * At least one of FOLL_GET | FOLL_PIN must be set, so
+                * assert that here:
+                */
+               if (!(flags & (FOLL_GET | FOLL_PIN)))
+                       return ERR_PTR(-EEXIST);
+
+               if (flags & FOLL_TOUCH)
+                       touch_pud(vma, addr, pudp, flags & FOLL_WRITE);
+
+               ctx->pgmap = get_dev_pagemap(pfn, ctx->pgmap);
+               if (!ctx->pgmap)
+                       return ERR_PTR(-EFAULT);
+       }
+
+       page = pfn_to_page(pfn);
+
+       if (!pud_devmap(pud) && !pud_write(pud) &&
+           gup_must_unshare(vma, flags, page))
+               return ERR_PTR(-EMLINK);
+
+       ret = try_grab_page(page, flags);
+       if (ret)
+               page = ERR_PTR(ret);
+       else
+               ctx->page_mask = HPAGE_PUD_NR - 1;
+
+       return page;
+}
+#else  /* CONFIG_PGTABLE_HAS_HUGE_LEAVES */
+static struct page *follow_huge_pud(struct vm_area_struct *vma,
+                                   unsigned long addr, pud_t *pudp,
+                                   int flags, struct follow_page_context *ctx)
+{
+       return NULL;
+}
+#endif /* CONFIG_PGTABLE_HAS_HUGE_LEAVES */
+
 static int follow_pfn_pte(struct vm_area_struct *vma, unsigned long address,
                pte_t *pte, unsigned int flags)
 {
@@ -760,11 +824,11 @@ static struct page *follow_pud_mask(struct vm_area_struct *vma,
 
        pudp = pud_offset(p4dp, address);
        pud = READ_ONCE(*pudp);
-       if (pud_none(pud))
+       if (!pud_present(pud))
                return no_page_table(vma, flags, address);
-       if (pud_devmap(pud)) {
+       if (pud_leaf(pud)) {
                ptl = pud_lock(mm, pudp);
-               page = follow_devmap_pud(vma, address, pudp, flags, &ctx->pgmap);
+               page = follow_huge_pud(vma, address, pudp, flags, ctx);
                spin_unlock(ptl);
                if (page)
                        return page;