powerpc/64s: Fix page table fragment refcount race vs speculative references
authorNicholas Piggin <npiggin@gmail.com>
Fri, 27 Jul 2018 11:48:17 +0000 (21:48 +1000)
committerMichael Ellerman <mpe@ellerman.id.au>
Tue, 7 Aug 2018 14:32:32 +0000 (00:32 +1000)
The page table fragment allocator uses the main page refcount racily
with respect to speculative references. A customer observed a BUG due
to page table page refcount underflow in the fragment allocator. This
can be caused by the fragment allocator set_page_count stomping on a
speculative reference, and then the speculative failure handler
decrements the new reference, and the underflow eventually pops when
the page tables are freed.

Fix this by using a dedicated field in the struct page for the page
table fragment allocator.

Fixes: 5c1f6ee9a31c ("powerpc: Reduce PTE table memory wastage")
Cc: stable@vger.kernel.org # v3.10+
Reviewed-by: Aneesh Kumar K.V <aneesh.kumar@linux.ibm.com>
Signed-off-by: Nicholas Piggin <npiggin@gmail.com>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
arch/powerpc/mm/mmu_context_book3s64.c
arch/powerpc/mm/pgtable-book3s64.c
include/linux/mm_types.h

index 8b24168ea8c436cbea406186138b55c0bbd45d8d..4a892d894a0f1b2d96ed319ab0f4a622104c7526 100644 (file)
@@ -200,9 +200,9 @@ static void pte_frag_destroy(void *pte_frag)
        /* drop all the pending references */
        count = ((unsigned long)pte_frag & ~PAGE_MASK) >> PTE_FRAG_SIZE_SHIFT;
        /* We allow PTE_FRAG_NR fragments from a PTE page */
-       if (page_ref_sub_and_test(page, PTE_FRAG_NR - count)) {
+       if (atomic_sub_and_test(PTE_FRAG_NR - count, &page->pt_frag_refcount)) {
                pgtable_page_dtor(page);
-               free_unref_page(page);
+               __free_page(page);
        }
 }
 
@@ -215,9 +215,9 @@ static void pmd_frag_destroy(void *pmd_frag)
        /* drop all the pending references */
        count = ((unsigned long)pmd_frag & ~PAGE_MASK) >> PMD_FRAG_SIZE_SHIFT;
        /* We allow PTE_FRAG_NR fragments from a PTE page */
-       if (page_ref_sub_and_test(page, PMD_FRAG_NR - count)) {
+       if (atomic_sub_and_test(PMD_FRAG_NR - count, &page->pt_frag_refcount)) {
                pgtable_pmd_page_dtor(page);
-               free_unref_page(page);
+               __free_page(page);
        }
 }
 
index 4afbfbb64bfd0a21254a177f4fa3df3c37bff6ea..78d0b3d5ebad1631b7349da988e8e0b3809485ad 100644 (file)
@@ -270,6 +270,8 @@ static pmd_t *__alloc_for_pmdcache(struct mm_struct *mm)
                return NULL;
        }
 
+       atomic_set(&page->pt_frag_refcount, 1);
+
        ret = page_address(page);
        /*
         * if we support only one fragment just return the
@@ -285,7 +287,7 @@ static pmd_t *__alloc_for_pmdcache(struct mm_struct *mm)
         * count.
         */
        if (likely(!mm->context.pmd_frag)) {
-               set_page_count(page, PMD_FRAG_NR);
+               atomic_set(&page->pt_frag_refcount, PMD_FRAG_NR);
                mm->context.pmd_frag = ret + PMD_FRAG_SIZE;
        }
        spin_unlock(&mm->page_table_lock);
@@ -308,9 +310,10 @@ void pmd_fragment_free(unsigned long *pmd)
 {
        struct page *page = virt_to_page(pmd);
 
-       if (put_page_testzero(page)) {
+       BUG_ON(atomic_read(&page->pt_frag_refcount) <= 0);
+       if (atomic_dec_and_test(&page->pt_frag_refcount)) {
                pgtable_pmd_page_dtor(page);
-               free_unref_page(page);
+               __free_page(page);
        }
 }
 
@@ -352,6 +355,7 @@ static pte_t *__alloc_for_ptecache(struct mm_struct *mm, int kernel)
                        return NULL;
        }
 
+       atomic_set(&page->pt_frag_refcount, 1);
 
        ret = page_address(page);
        /*
@@ -367,7 +371,7 @@ static pte_t *__alloc_for_ptecache(struct mm_struct *mm, int kernel)
         * count.
         */
        if (likely(!mm->context.pte_frag)) {
-               set_page_count(page, PTE_FRAG_NR);
+               atomic_set(&page->pt_frag_refcount, PTE_FRAG_NR);
                mm->context.pte_frag = ret + PTE_FRAG_SIZE;
        }
        spin_unlock(&mm->page_table_lock);
@@ -390,10 +394,11 @@ void pte_fragment_free(unsigned long *table, int kernel)
 {
        struct page *page = virt_to_page(table);
 
-       if (put_page_testzero(page)) {
+       BUG_ON(atomic_read(&page->pt_frag_refcount) <= 0);
+       if (atomic_dec_and_test(&page->pt_frag_refcount)) {
                if (!kernel)
                        pgtable_page_dtor(page);
-               free_unref_page(page);
+               __free_page(page);
        }
 }
 
index 99ce070e7dcb4fb14af6897d2804b671eb0ff4e2..22651e124071f3cf602396766ae77685266a5582 100644 (file)
@@ -139,7 +139,10 @@ struct page {
                        unsigned long _pt_pad_1;        /* compound_head */
                        pgtable_t pmd_huge_pte; /* protected by page->ptl */
                        unsigned long _pt_pad_2;        /* mapping */
-                       struct mm_struct *pt_mm;        /* x86 pgds only */
+                       union {
+                               struct mm_struct *pt_mm; /* x86 pgds only */
+                               atomic_t pt_frag_refcount; /* powerpc */
+                       };
 #if ALLOC_SPLIT_PTLOCKS
                        spinlock_t *ptl;
 #else