arch/powerpc/mm/hash: validate the pte entries before handling the hash fault
authorAneesh Kumar K.V <aneesh.kumar@linux.ibm.com>
Thu, 20 Sep 2018 18:09:45 +0000 (23:39 +0530)
committerMichael Ellerman <mpe@ellerman.id.au>
Wed, 3 Oct 2018 05:39:59 +0000 (15:39 +1000)
Make sure we are operating on THP and hugetlb entries in the respective hash
fault handling routines.

No functional change in this patch. If we walked the table wrongly before, we
will retry the access.

Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.ibm.com>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
arch/powerpc/mm/hugepage-hash64.c
arch/powerpc/mm/hugetlbpage-hash64.c

index 01f213d2bcb9b63693c96c5b5c0649eb919c438a..dfbc3b32f09b8760a83ce50413a1f7285e096b46 100644 (file)
@@ -51,6 +51,12 @@ int __hash_page_thp(unsigned long ea, unsigned long access, unsigned long vsid,
                        new_pmd |= _PAGE_DIRTY;
        } while (!pmd_xchg(pmdp, __pmd(old_pmd), __pmd(new_pmd)));
 
+       /*
+        * Make sure this is thp or devmap entry
+        */
+       if (!(old_pmd & (H_PAGE_THP_HUGE | _PAGE_DEVMAP)))
+               return 0;
+
        rflags = htab_convert_pte_flags(new_pmd);
 
 #if 0
index b320f5097a0616dce810c31e42fa42659475d4c3..2e6a8f9345d3708b908d76e5745d9ca0f9f1914f 100644 (file)
@@ -62,6 +62,10 @@ int __hash_page_huge(unsigned long ea, unsigned long access, unsigned long vsid,
                        new_pte |= _PAGE_DIRTY;
        } while(!pte_xchg(ptep, __pte(old_pte), __pte(new_pte)));
 
+       /* Make sure this is a hugetlb entry */
+       if (old_pte & (H_PAGE_THP_HUGE | _PAGE_DEVMAP))
+               return 0;
+
        rflags = htab_convert_pte_flags(new_pte);
        if (unlikely(mmu_psize == MMU_PAGE_16G))
                offset = PTRS_PER_PUD;