1 #ifndef _ASM_POWERPC_BOOK3S_64_TLBFLUSH_HASH_H
2 #define _ASM_POWERPC_BOOK3S_64_TLBFLUSH_HASH_H
5 * TLB flushing for 64-bit hash-MMU CPUs
8 #include <linux/percpu.h>
11 #define PPC64_TLB_BATCH_NR 192
13 struct ppc64_tlb_batch {
17 real_pte_t pte[PPC64_TLB_BATCH_NR];
18 unsigned long vpn[PPC64_TLB_BATCH_NR];
22 DECLARE_PER_CPU(struct ppc64_tlb_batch, ppc64_tlb_batch);
24 extern void __flush_tlb_pending(struct ppc64_tlb_batch *batch);
26 #define __HAVE_ARCH_ENTER_LAZY_MMU_MODE
28 static inline void arch_enter_lazy_mmu_mode(void)
30 struct ppc64_tlb_batch *batch;
34 batch = this_cpu_ptr(&ppc64_tlb_batch);
38 static inline void arch_leave_lazy_mmu_mode(void)
40 struct ppc64_tlb_batch *batch;
44 batch = this_cpu_ptr(&ppc64_tlb_batch);
47 __flush_tlb_pending(batch);
51 #define arch_flush_lazy_mmu_mode() do {} while (0)
54 extern void flush_hash_page(unsigned long vpn, real_pte_t pte, int psize,
55 int ssize, unsigned long flags);
56 extern void flush_hash_range(unsigned long number, int local);
57 extern void flush_hash_hugepage(unsigned long vsid, unsigned long addr,
58 pmd_t *pmdp, unsigned int psize, int ssize,
60 static inline void hash__local_flush_tlb_mm(struct mm_struct *mm)
64 static inline void hash__flush_tlb_mm(struct mm_struct *mm)
68 static inline void hash__local_flush_tlb_page(struct vm_area_struct *vma,
73 static inline void hash__flush_tlb_page(struct vm_area_struct *vma,
78 static inline void hash__flush_tlb_page_nohash(struct vm_area_struct *vma,
83 static inline void hash__flush_tlb_range(struct vm_area_struct *vma,
84 unsigned long start, unsigned long end)
88 static inline void hash__flush_tlb_kernel_range(unsigned long start,
95 extern void hash__tlb_flush(struct mmu_gather *tlb);
96 /* Private function for use by PCI IO mapping code */
97 extern void __flush_hash_table_range(struct mm_struct *mm, unsigned long start,
99 extern void flush_tlb_pmd_range(struct mm_struct *mm, pmd_t *pmd,
101 #endif /* _ASM_POWERPC_BOOK3S_64_TLBFLUSH_HASH_H */