powerpc/mm/book3s/hash: Rename flush_tlb_pmd_range
[linux-2.6-block.git] / arch / powerpc / include / asm / book3s / 64 / tlbflush-hash.h
CommitLineData
b2441318 1/* SPDX-License-Identifier: GPL-2.0 */
ee3b93eb
AK
2#ifndef _ASM_POWERPC_BOOK3S_64_TLBFLUSH_HASH_H
3#define _ASM_POWERPC_BOOK3S_64_TLBFLUSH_HASH_H
4
ee3b93eb
AK
5/*
6 * TLB flushing for 64-bit hash-MMU CPUs
7 */
8
9#include <linux/percpu.h>
10#include <asm/page.h>
11
12#define PPC64_TLB_BATCH_NR 192
13
14struct ppc64_tlb_batch {
15 int active;
16 unsigned long index;
17 struct mm_struct *mm;
18 real_pte_t pte[PPC64_TLB_BATCH_NR];
19 unsigned long vpn[PPC64_TLB_BATCH_NR];
20 unsigned int psize;
21 int ssize;
22};
23DECLARE_PER_CPU(struct ppc64_tlb_batch, ppc64_tlb_batch);
24
25extern void __flush_tlb_pending(struct ppc64_tlb_batch *batch);
26
27#define __HAVE_ARCH_ENTER_LAZY_MMU_MODE
28
29static inline void arch_enter_lazy_mmu_mode(void)
30{
1a472c9d 31 struct ppc64_tlb_batch *batch;
ee3b93eb 32
1a472c9d
AK
33 if (radix_enabled())
34 return;
35 batch = this_cpu_ptr(&ppc64_tlb_batch);
ee3b93eb
AK
36 batch->active = 1;
37}
38
39static inline void arch_leave_lazy_mmu_mode(void)
40{
1a472c9d
AK
41 struct ppc64_tlb_batch *batch;
42
43 if (radix_enabled())
44 return;
45 batch = this_cpu_ptr(&ppc64_tlb_batch);
ee3b93eb
AK
46
47 if (batch->index)
48 __flush_tlb_pending(batch);
49 batch->active = 0;
50}
51
52#define arch_flush_lazy_mmu_mode() do {} while (0)
53
d4748276 54extern void hash__tlbiel_all(unsigned int action);
ee3b93eb
AK
55
56extern void flush_hash_page(unsigned long vpn, real_pte_t pte, int psize,
57 int ssize, unsigned long flags);
58extern void flush_hash_range(unsigned long number, int local);
59extern void flush_hash_hugepage(unsigned long vsid, unsigned long addr,
60 pmd_t *pmdp, unsigned int psize, int ssize,
61 unsigned long flags);
676012a6 62static inline void hash__local_flush_tlb_mm(struct mm_struct *mm)
ee3b93eb
AK
63{
64}
65
676012a6 66static inline void hash__flush_tlb_mm(struct mm_struct *mm)
ee3b93eb
AK
67{
68}
69
6110236b
FB
70static inline void hash__local_flush_all_mm(struct mm_struct *mm)
71{
72 /*
73 * There's no Page Walk Cache for hash, so what is needed is
74 * the same as flush_tlb_mm(), which doesn't really make sense
75 * with hash. So the only thing we could do is flush the
76 * entire LPID! Punt for now, as it's not being used.
77 */
78 WARN_ON_ONCE(1);
79}
80
81static inline void hash__flush_all_mm(struct mm_struct *mm)
82{
83 /*
84 * There's no Page Walk Cache for hash, so what is needed is
85 * the same as flush_tlb_mm(), which doesn't really make sense
86 * with hash. So the only thing we could do is flush the
87 * entire LPID! Punt for now, as it's not being used.
88 */
89 WARN_ON_ONCE(1);
90}
91
676012a6
AK
92static inline void hash__local_flush_tlb_page(struct vm_area_struct *vma,
93 unsigned long vmaddr)
ee3b93eb
AK
94{
95}
96
676012a6
AK
97static inline void hash__flush_tlb_page(struct vm_area_struct *vma,
98 unsigned long vmaddr)
ee3b93eb
AK
99{
100}
101
676012a6
AK
102static inline void hash__flush_tlb_range(struct vm_area_struct *vma,
103 unsigned long start, unsigned long end)
ee3b93eb
AK
104{
105}
106
676012a6
AK
107static inline void hash__flush_tlb_kernel_range(unsigned long start,
108 unsigned long end)
ee3b93eb
AK
109{
110}
111
676012a6
AK
112
113struct mmu_gather;
114extern void hash__tlb_flush(struct mmu_gather *tlb);
387e220a
NP
115
116#ifdef CONFIG_PPC_64S_HASH_MMU
ee3b93eb 117/* Private function for use by PCI IO mapping code */
7900757c 118extern void __flush_hash_table_range(unsigned long start, unsigned long end);
d368e0c4 119void flush_hash_table_pmd_range(struct mm_struct *mm, pmd_t *pmd, unsigned long addr);
387e220a
NP
120#else
121static inline void __flush_hash_table_range(unsigned long start, unsigned long end) { }
122#endif
ee3b93eb 123#endif /* _ASM_POWERPC_BOOK3S_64_TLBFLUSH_HASH_H */