1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _ASM_POWERPC_BOOK3S_64_TLBFLUSH_H
3 #define _ASM_POWERPC_BOOK3S_64_TLBFLUSH_H
5 #define MMU_NO_CONTEXT ~0UL
7 #include <linux/mm_types.h>
8 #include <asm/book3s/64/tlbflush-hash.h>
9 #include <asm/book3s/64/tlbflush-radix.h>
11 /* TLB flush actions. Used as argument to tlbiel_all() */
13 TLB_INVAL_SCOPE_GLOBAL = 0, /* invalidate all TLBs */
14 TLB_INVAL_SCOPE_LPID = 1, /* invalidate TLBs for current LPID */
17 static inline void tlbiel_all(void)
20 * This is used for host machine check and bootup.
22 * This uses early_radix_enabled and implementations use
23 * early_cpu_has_feature etc because that works early in boot
24 * and this is the machine check path which is not performance
27 if (early_radix_enabled())
28 radix__tlbiel_all(TLB_INVAL_SCOPE_GLOBAL);
30 hash__tlbiel_all(TLB_INVAL_SCOPE_GLOBAL);
33 static inline void tlbiel_all_lpid(bool radix)
36 * This is used for guest machine check.
39 radix__tlbiel_all(TLB_INVAL_SCOPE_LPID);
41 hash__tlbiel_all(TLB_INVAL_SCOPE_LPID);
45 #define __HAVE_ARCH_FLUSH_PMD_TLB_RANGE
46 static inline void flush_pmd_tlb_range(struct vm_area_struct *vma,
47 unsigned long start, unsigned long end)
50 radix__flush_pmd_tlb_range(vma, start, end);
53 #define __HAVE_ARCH_FLUSH_HUGETLB_TLB_RANGE
54 static inline void flush_hugetlb_tlb_range(struct vm_area_struct *vma,
59 radix__flush_hugetlb_tlb_range(vma, start, end);
62 static inline void flush_tlb_range(struct vm_area_struct *vma,
63 unsigned long start, unsigned long end)
66 radix__flush_tlb_range(vma, start, end);
69 static inline void flush_tlb_kernel_range(unsigned long start,
73 radix__flush_tlb_kernel_range(start, end);
76 static inline void local_flush_tlb_mm(struct mm_struct *mm)
79 radix__local_flush_tlb_mm(mm);
82 static inline void local_flush_tlb_page(struct vm_area_struct *vma,
86 radix__local_flush_tlb_page(vma, vmaddr);
89 static inline void local_flush_tlb_page_psize(struct mm_struct *mm,
90 unsigned long vmaddr, int psize)
93 radix__local_flush_tlb_page_psize(mm, vmaddr, psize);
96 static inline void tlb_flush(struct mmu_gather *tlb)
99 radix__tlb_flush(tlb);
101 return hash__tlb_flush(tlb);
105 static inline void flush_tlb_mm(struct mm_struct *mm)
108 radix__flush_tlb_mm(mm);
111 static inline void flush_tlb_page(struct vm_area_struct *vma,
112 unsigned long vmaddr)
115 radix__flush_tlb_page(vma, vmaddr);
118 #define flush_tlb_mm(mm) local_flush_tlb_mm(mm)
119 #define flush_tlb_page(vma, addr) local_flush_tlb_page(vma, addr)
120 #endif /* CONFIG_SMP */
122 #define flush_tlb_fix_spurious_fault flush_tlb_fix_spurious_fault
123 static inline void flush_tlb_fix_spurious_fault(struct vm_area_struct *vma,
124 unsigned long address)
127 * Book3S 64 does not require spurious fault flushes because the PTE
128 * must be re-fetched in case of an access permission problem. So the
129 * only reason for a spurious fault should be concurrent modification
130 * to the PTE, in which case the PTE will eventually be re-fetched by
131 * the MMU when it attempts the access again.
133 * See: Power ISA Version 3.1B, 6.10.1.2 Modifying a Translation Table
134 * Entry, Setting a Reference or Change Bit or Upgrading Access
135 * Authority (PTE Subject to Atomic Hardware Updates):
137 * "If the only change being made to a valid PTE that is subject to
138 * atomic hardware updates is to set the Reference or Change bit to
139 * 1 or to upgrade access authority, a simpler sequence suffices
140 * because the translation hardware will refetch the PTE if an
141 * access is attempted for which the only problems were reference
142 * and/or change bits needing to be set or insufficient access
145 * The nest MMU in POWER9 does not perform this PTE re-fetch, but
146 * it avoids the spurious fault problem by flushing the TLB before
147 * upgrading PTE permissions, see radix__ptep_set_access_flags.
151 static inline bool __pte_flags_need_flush(unsigned long oldval,
152 unsigned long newval)
154 unsigned long delta = oldval ^ newval;
157 * The return value of this function doesn't matter for hash,
158 * ptep_modify_prot_start() does a pte_update() which does or schedules
159 * any necessary hash table update and flush.
161 if (!radix_enabled())
165 * We do not expect kernel mappings or non-PTEs or not-present PTEs.
167 VM_WARN_ON_ONCE(oldval & _PAGE_PRIVILEGED);
168 VM_WARN_ON_ONCE(newval & _PAGE_PRIVILEGED);
169 VM_WARN_ON_ONCE(!(oldval & _PAGE_PTE));
170 VM_WARN_ON_ONCE(!(newval & _PAGE_PTE));
171 VM_WARN_ON_ONCE(!(oldval & _PAGE_PRESENT));
172 VM_WARN_ON_ONCE(!(newval & _PAGE_PRESENT));
175 * Must flush on any change except READ, WRITE, EXEC, DIRTY, ACCESSED.
177 * In theory, some changed software bits could be tolerated, in
178 * practice those should rarely if ever matter.
181 if (delta & ~(_PAGE_RWX | _PAGE_DIRTY | _PAGE_ACCESSED))
185 * If any of the above was present in old but cleared in new, flush.
186 * With the exception of _PAGE_ACCESSED, don't worry about flushing
187 * if that was cleared (see the comment in ptep_clear_flush_young()).
189 if ((delta & ~_PAGE_ACCESSED) & oldval)
195 static inline bool pte_needs_flush(pte_t oldpte, pte_t newpte)
197 return __pte_flags_need_flush(pte_val(oldpte), pte_val(newpte));
199 #define pte_needs_flush pte_needs_flush
201 static inline bool huge_pmd_needs_flush(pmd_t oldpmd, pmd_t newpmd)
203 return __pte_flags_need_flush(pmd_val(oldpmd), pmd_val(newpmd));
205 #define huge_pmd_needs_flush huge_pmd_needs_flush
207 extern bool tlbie_capable;
208 extern bool tlbie_enabled;
210 static inline bool cputlb_use_tlbie(void)
212 return tlbie_enabled;
215 #endif /* _ASM_POWERPC_BOOK3S_64_TLBFLUSH_H */