powerpc/32s: Split and inline flush_tlb_mm() and flush_tlb_page()
authorChristophe Leroy <christophe.leroy@csgroup.eu>
Thu, 22 Oct 2020 06:29:36 +0000 (06:29 +0000)
committerMichael Ellerman <mpe@ellerman.id.au>
Wed, 9 Dec 2020 05:46:55 +0000 (16:46 +1100)
flush_tlb_mm() and flush_tlb_page() handle both the MMU_FTR_HPTE_TABLE
case and the other case.

The non MMU_FTR_HPTE_TABLE case is trivial as it is only a call
to _tlbie()/_tlbia() which is not worth a dedicated function.

Make flush_tlb_mm() and flush_tlb_page() hash specific and call
them from tlbflush.h based on mmu_has_feature(MMU_FTR_HPTE_TABLE).

Signed-off-by: Christophe Leroy <christophe.leroy@csgroup.eu>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/11e932ded41ba6d9b251d89b7afa33cc060d3aa4.1603348103.git.christophe.leroy@csgroup.eu
arch/powerpc/include/asm/book3s/32/tlbflush.h
arch/powerpc/mm/book3s32/tlb.c

index f392a619138d717ae736a37eb1b56eb7ce5416f5..542765944531127caceb8d7eb12ae92374563739 100644 (file)
@@ -6,8 +6,8 @@
 /*
  * TLB flushing for "classic" hash-MMU 32-bit CPUs, 6xx, 7xx, 7xxx
  */
-extern void flush_tlb_mm(struct mm_struct *mm);
-extern void flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr);
+void hash__flush_tlb_mm(struct mm_struct *mm);
+void hash__flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr);
 extern void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
                            unsigned long end);
 extern void flush_tlb_kernel_range(unsigned long start, unsigned long end);
@@ -22,6 +22,22 @@ static inline void _tlbie(unsigned long address)
 #endif
 void _tlbia(void);
 
+static inline void flush_tlb_mm(struct mm_struct *mm)
+{
+       if (mmu_has_feature(MMU_FTR_HPTE_TABLE))
+               hash__flush_tlb_mm(mm);
+       else
+               _tlbia();
+}
+
+static inline void flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr)
+{
+       if (mmu_has_feature(MMU_FTR_HPTE_TABLE))
+               hash__flush_tlb_page(vma, vmaddr);
+       else
+               _tlbie(vmaddr);
+}
+
 static inline void local_flush_tlb_page(struct vm_area_struct *vma,
                                        unsigned long vmaddr)
 {
index ae5dbba9580543afba14ab9d659cec5a8f3874eb..65389bfe2eb8ed8f30b8101b30f72ee1f12ecddd 100644 (file)
@@ -118,15 +118,10 @@ EXPORT_SYMBOL(flush_tlb_kernel_range);
 /*
  * Flush all the (user) entries for the address space described by mm.
  */
-void flush_tlb_mm(struct mm_struct *mm)
+void hash__flush_tlb_mm(struct mm_struct *mm)
 {
        struct vm_area_struct *mp;
 
-       if (!mmu_has_feature(MMU_FTR_HPTE_TABLE)) {
-               _tlbia();
-               return;
-       }
-
        /*
         * It is safe to go down the mm's list of vmas when called
         * from dup_mmap, holding mmap_lock.  It would also be safe from
@@ -136,23 +131,19 @@ void flush_tlb_mm(struct mm_struct *mm)
        for (mp = mm->mmap; mp != NULL; mp = mp->vm_next)
                flush_range(mp->vm_mm, mp->vm_start, mp->vm_end);
 }
-EXPORT_SYMBOL(flush_tlb_mm);
+EXPORT_SYMBOL(hash__flush_tlb_mm);
 
-void flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr)
+void hash__flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr)
 {
        struct mm_struct *mm;
        pmd_t *pmd;
 
-       if (!mmu_has_feature(MMU_FTR_HPTE_TABLE)) {
-               _tlbie(vmaddr);
-               return;
-       }
        mm = (vmaddr < TASK_SIZE)? vma->vm_mm: &init_mm;
        pmd = pmd_off(mm, vmaddr);
        if (!pmd_none(*pmd))
                flush_hash_pages(mm->context.id, vmaddr, pmd_val(*pmd), 1);
 }
-EXPORT_SYMBOL(flush_tlb_page);
+EXPORT_SYMBOL(hash__flush_tlb_page);
 
 /*
  * For each address in the range, find the pte for the address