powerpc/nohash: Refactor __ptep_set_access_flags()
authorChristophe Leroy <christophe.leroy@csgroup.eu>
Mon, 25 Sep 2023 18:31:33 +0000 (20:31 +0200)
committerMichael Ellerman <mpe@ellerman.id.au>
Thu, 19 Oct 2023 06:12:45 +0000 (17:12 +1100)
nohash/32 version of __ptep_set_access_flags() does the same
as nohash/64 version, the only difference is that nohash/32
version is more complete and uses pte_update().

Make it common and remove the nohash/64 version.

Signed-off-by: Christophe Leroy <christophe.leroy@csgroup.eu>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Link: https://msgid.link/e296885df46289d3e5f4cb51efeefe593f76ef24.1695659959.git.christophe.leroy@csgroup.eu
arch/powerpc/include/asm/nohash/32/pgtable.h
arch/powerpc/include/asm/nohash/64/pgtable.h
arch/powerpc/include/asm/nohash/pgtable.h

index 481594097f460af74faed597098871b6a5f6c655..9164a9e41b020693c3607338d26a0717f3fcac75 100644 (file)
@@ -161,22 +161,6 @@ static inline void pmd_clear(pmd_t *pmdp)
        *pmdp = __pmd(0);
 }
 
-#ifndef __ptep_set_access_flags
-static inline void __ptep_set_access_flags(struct vm_area_struct *vma,
-                                          pte_t *ptep, pte_t entry,
-                                          unsigned long address,
-                                          int psize)
-{
-       unsigned long set = pte_val(entry) &
-                           (_PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_RW | _PAGE_EXEC);
-       int huge = psize > mmu_virtual_psize ? 1 : 0;
-
-       pte_update(vma->vm_mm, address, ptep, 0, set, huge);
-
-       flush_tlb_page(vma, address);
-}
-#endif
-
 /*
  * Note that on Book E processors, the pmd contains the kernel virtual
  * (lowmem) address of the pte page.  The physical address is less useful
index b59fbf754f820f810dd3e71759b6163919f7e1b5..36b9bad428ccc5cc6f518f9752933d432a25a434 100644 (file)
@@ -159,21 +159,6 @@ static inline void huge_ptep_set_wrprotect(struct mm_struct *mm,
        __young;                                                        \
 })
 
-/* Set the dirty and/or accessed bits atomically in a linux PTE */
-static inline void __ptep_set_access_flags(struct vm_area_struct *vma,
-                                          pte_t *ptep, pte_t entry,
-                                          unsigned long address,
-                                          int psize)
-{
-       unsigned long bits = pte_val(entry) &
-               (_PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_RW | _PAGE_EXEC);
-
-       unsigned long old = pte_val(*ptep);
-       *ptep = __pte(old | bits);
-
-       flush_tlb_page(vma, address);
-}
-
 #define pmd_ERROR(e) \
        pr_err("%s:%d: bad pmd %08lx.\n", __FILE__, __LINE__, pmd_val(e))
 #define pgd_ERROR(e) \
index b974d1ab8a9763bae942ec497cfdc46e129816fc..c5f21eda2a43bd122fbbcf924b06aefa054c0a64 100644 (file)
@@ -101,6 +101,23 @@ static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *pt
        pte_update(mm, addr, ptep, ~0UL, 0, 0);
 }
 
+/* Set the dirty and/or accessed bits atomically in a linux PTE */
+#ifndef __ptep_set_access_flags
+static inline void __ptep_set_access_flags(struct vm_area_struct *vma,
+                                          pte_t *ptep, pte_t entry,
+                                          unsigned long address,
+                                          int psize)
+{
+       unsigned long set = pte_val(entry) &
+                           (_PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_RW | _PAGE_EXEC);
+       int huge = psize > mmu_virtual_psize ? 1 : 0;
+
+       pte_update(vma->vm_mm, address, ptep, 0, set, huge);
+
+       flush_tlb_page(vma, address);
+}
+#endif
+
 /* Generic accessors to PTE bits */
 #ifndef pte_mkwrite_novma
 static inline pte_t pte_mkwrite_novma(pte_t pte)