powerpc/hugetlb: add follow_huge_pd implementation for ppc64
authorAneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
Thu, 6 Jul 2017 22:38:59 +0000 (15:38 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Thu, 6 Jul 2017 23:24:33 +0000 (16:24 -0700)
Link: http://lkml.kernel.org/r/1494926612-23928-8-git-send-email-aneesh.kumar@linux.vnet.ibm.com
Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
Cc: Anshuman Khandual <khandual@linux.vnet.ibm.com>
Cc: Naoya Horiguchi <n-horiguchi@ah.jp.nec.com>
Cc: Michael Ellerman <mpe@ellerman.id.au>
Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Cc: Mike Kravetz <kravetz@us.ibm.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
arch/powerpc/mm/hugetlbpage.c

index a4f33de4008eeed9829eb1ceef01e72d7cedfd8f..f5ec043d49df3cfc6c3621ae241ddba39588839c 100644 (file)
@@ -17,6 +17,8 @@
 #include <linux/memblock.h>
 #include <linux/bootmem.h>
 #include <linux/moduleparam.h>
+#include <linux/swap.h>
+#include <linux/swapops.h>
 #include <asm/pgtable.h>
 #include <asm/pgalloc.h>
 #include <asm/tlb.h>
@@ -617,6 +619,46 @@ void hugetlb_free_pgd_range(struct mmu_gather *tlb,
        } while (addr = next, addr != end);
 }
 
+/*
+ * 64 bit book3s use generic follow_page_mask
+ */
+#ifdef CONFIG_PPC_BOOK3S_64
+
+struct page *follow_huge_pd(struct vm_area_struct *vma,
+                           unsigned long address, hugepd_t hpd,
+                           int flags, int pdshift)
+{
+       pte_t *ptep;
+       spinlock_t *ptl;
+       struct page *page = NULL;
+       unsigned long mask;
+       int shift = hugepd_shift(hpd);
+       struct mm_struct *mm = vma->vm_mm;
+
+retry:
+       ptl = &mm->page_table_lock;
+       spin_lock(ptl);
+
+       ptep = hugepte_offset(hpd, address, pdshift);
+       if (pte_present(*ptep)) {
+               mask = (1UL << shift) - 1;
+               page = pte_page(*ptep);
+               page += ((address & mask) >> PAGE_SHIFT);
+               if (flags & FOLL_GET)
+                       get_page(page);
+       } else {
+               if (is_hugetlb_entry_migration(*ptep)) {
+                       spin_unlock(ptl);
+                       __migration_entry_wait(mm, ptep, ptl);
+                       goto retry;
+               }
+       }
+       spin_unlock(ptl);
+       return page;
+}
+
+#else /* !CONFIG_PPC_BOOK3S_64 */
+
 /*
  * We are holding mmap_sem, so a parallel huge page collapse cannot run.
  * To prevent hugepage split, disable irq.
@@ -672,6 +714,7 @@ follow_huge_pud(struct mm_struct *mm, unsigned long address,
        BUG();
        return NULL;
 }
+#endif
 
 static unsigned long hugepte_addr_end(unsigned long addr, unsigned long end,
                                      unsigned long sz)