powerpc/mm: update pmdp_invalidate to return old pmd value
authorAneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
Thu, 1 Feb 2018 00:18:02 +0000 (16:18 -0800)
committerLinus Torvalds <torvalds@linux-foundation.org>
Thu, 1 Feb 2018 01:18:37 +0000 (17:18 -0800)
It's required to avoid losing dirty and accessed bits.

Link: http://lkml.kernel.org/r/20171213105756.69879-7-kirill.shutemov@linux.intel.com
Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
Cc: Vlastimil Babka <vbabka@suse.cz>
Cc: Andrea Arcangeli <aarcange@redhat.com>
Cc: Michal Hocko <mhocko@kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
arch/powerpc/include/asm/book3s/64/pgtable.h
arch/powerpc/mm/pgtable-book3s64.c

index 44697817ccc6ddc13406dc30388d06d7e8795335..ee19d5bbee06adac1edd3fd41c8bc74cbe68e36b 100644 (file)
@@ -1137,8 +1137,8 @@ static inline pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm,
 }
 
 #define __HAVE_ARCH_PMDP_INVALIDATE
-extern void pmdp_invalidate(struct vm_area_struct *vma, unsigned long address,
-                           pmd_t *pmdp);
+extern pmd_t pmdp_invalidate(struct vm_area_struct *vma, unsigned long address,
+                            pmd_t *pmdp);
 
 #define __HAVE_ARCH_PMDP_HUGE_SPLIT_PREPARE
 static inline void pmdp_huge_split_prepare(struct vm_area_struct *vma,
index 3b65917785a5789a771e6befc1c09d4c03d876de..422e80253a3336380a4a42b3c72783ff447c3077 100644 (file)
@@ -90,16 +90,19 @@ void serialize_against_pte_lookup(struct mm_struct *mm)
  * We use this to invalidate a pmdp entry before switching from a
  * hugepte to regular pmd entry.
  */
-void pmdp_invalidate(struct vm_area_struct *vma, unsigned long address,
+pmd_t pmdp_invalidate(struct vm_area_struct *vma, unsigned long address,
                     pmd_t *pmdp)
 {
-       pmd_hugepage_update(vma->vm_mm, address, pmdp, _PAGE_PRESENT, 0);
+       unsigned long old_pmd;
+
+       old_pmd = pmd_hugepage_update(vma->vm_mm, address, pmdp, _PAGE_PRESENT, 0);
        flush_pmd_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
        /*
         * This ensures that generic code that rely on IRQ disabling
         * to prevent a parallel THP split work as expected.
         */
        serialize_against_pte_lookup(vma->vm_mm);
+       return __pmd(old_pmd);
 }
 
 static pmd_t pmd_set_protbits(pmd_t pmd, pgprot_t pgprot)