arch/powerpc/mm/hugetlb: NestMMU workaround for hugetlb mprotect RW upgrade
[linux-2.6-block.git] / arch / powerpc / include / asm / book3s / 64 / tlbflush.h
CommitLineData
b2441318 1/* SPDX-License-Identifier: GPL-2.0 */
676012a6
AK
2#ifndef _ASM_POWERPC_BOOK3S_64_TLBFLUSH_H
3#define _ASM_POWERPC_BOOK3S_64_TLBFLUSH_H
4
1a472c9d
AK
5#define MMU_NO_CONTEXT ~0UL
6
6d8278c4 7#include <linux/mm_types.h>
676012a6 8#include <asm/book3s/64/tlbflush-hash.h>
1a472c9d 9#include <asm/book3s/64/tlbflush-radix.h>
676012a6 10
d4748276
NP
11/* TLB flush actions. Used as argument to tlbiel_all() */
12enum {
13 TLB_INVAL_SCOPE_GLOBAL = 0, /* invalidate all TLBs */
14 TLB_INVAL_SCOPE_LPID = 1, /* invalidate TLBs for current LPID */
15};
16
7a074fc0 17#ifdef CONFIG_PPC_NATIVE
d4748276
NP
18static inline void tlbiel_all(void)
19{
20 /*
21 * This is used for host machine check and bootup.
22 *
23 * This uses early_radix_enabled and implementations use
24 * early_cpu_has_feature etc because that works early in boot
25 * and this is the machine check path which is not performance
26 * critical.
27 */
28 if (early_radix_enabled())
29 radix__tlbiel_all(TLB_INVAL_SCOPE_GLOBAL);
30 else
31 hash__tlbiel_all(TLB_INVAL_SCOPE_GLOBAL);
32}
7a074fc0
ME
33#else
34static inline void tlbiel_all(void) { BUG(); };
35#endif
d4748276
NP
36
37static inline void tlbiel_all_lpid(bool radix)
38{
39 /*
40 * This is used for guest machine check.
41 */
42 if (radix)
43 radix__tlbiel_all(TLB_INVAL_SCOPE_LPID);
44 else
45 hash__tlbiel_all(TLB_INVAL_SCOPE_LPID);
46}
47
48
d8e91e93
AK
49#define __HAVE_ARCH_FLUSH_PMD_TLB_RANGE
50static inline void flush_pmd_tlb_range(struct vm_area_struct *vma,
51 unsigned long start, unsigned long end)
52{
53 if (radix_enabled())
54 return radix__flush_pmd_tlb_range(vma, start, end);
55 return hash__flush_tlb_range(vma, start, end);
56}
57
5491ae7b
AK
58#define __HAVE_ARCH_FLUSH_HUGETLB_TLB_RANGE
59static inline void flush_hugetlb_tlb_range(struct vm_area_struct *vma,
60 unsigned long start,
61 unsigned long end)
62{
63 if (radix_enabled())
64 return radix__flush_hugetlb_tlb_range(vma, start, end);
65 return hash__flush_tlb_range(vma, start, end);
66}
67
676012a6
AK
68static inline void flush_tlb_range(struct vm_area_struct *vma,
69 unsigned long start, unsigned long end)
70{
1a472c9d
AK
71 if (radix_enabled())
72 return radix__flush_tlb_range(vma, start, end);
676012a6
AK
73 return hash__flush_tlb_range(vma, start, end);
74}
75
76static inline void flush_tlb_kernel_range(unsigned long start,
77 unsigned long end)
78{
1a472c9d
AK
79 if (radix_enabled())
80 return radix__flush_tlb_kernel_range(start, end);
676012a6
AK
81 return hash__flush_tlb_kernel_range(start, end);
82}
83
84static inline void local_flush_tlb_mm(struct mm_struct *mm)
85{
1a472c9d
AK
86 if (radix_enabled())
87 return radix__local_flush_tlb_mm(mm);
676012a6
AK
88 return hash__local_flush_tlb_mm(mm);
89}
90
91static inline void local_flush_tlb_page(struct vm_area_struct *vma,
92 unsigned long vmaddr)
93{
1a472c9d
AK
94 if (radix_enabled())
95 return radix__local_flush_tlb_page(vma, vmaddr);
676012a6
AK
96 return hash__local_flush_tlb_page(vma, vmaddr);
97}
98
6110236b
FB
99static inline void local_flush_all_mm(struct mm_struct *mm)
100{
101 if (radix_enabled())
102 return radix__local_flush_all_mm(mm);
103 return hash__local_flush_all_mm(mm);
104}
105
676012a6
AK
106static inline void tlb_flush(struct mmu_gather *tlb)
107{
1a472c9d
AK
108 if (radix_enabled())
109 return radix__tlb_flush(tlb);
676012a6
AK
110 return hash__tlb_flush(tlb);
111}
112
113#ifdef CONFIG_SMP
114static inline void flush_tlb_mm(struct mm_struct *mm)
115{
1a472c9d
AK
116 if (radix_enabled())
117 return radix__flush_tlb_mm(mm);
676012a6
AK
118 return hash__flush_tlb_mm(mm);
119}
120
121static inline void flush_tlb_page(struct vm_area_struct *vma,
122 unsigned long vmaddr)
123{
1a472c9d
AK
124 if (radix_enabled())
125 return radix__flush_tlb_page(vma, vmaddr);
676012a6
AK
126 return hash__flush_tlb_page(vma, vmaddr);
127}
6110236b
FB
128
129static inline void flush_all_mm(struct mm_struct *mm)
130{
131 if (radix_enabled())
132 return radix__flush_all_mm(mm);
133 return hash__flush_all_mm(mm);
134}
676012a6
AK
135#else
136#define flush_tlb_mm(mm) local_flush_tlb_mm(mm)
137#define flush_tlb_page(vma, addr) local_flush_tlb_page(vma, addr)
6110236b 138#define flush_all_mm(mm) local_flush_all_mm(mm)
676012a6 139#endif /* CONFIG_SMP */
6d8278c4
NP
140
141#define flush_tlb_fix_spurious_fault flush_tlb_fix_spurious_fault
142static inline void flush_tlb_fix_spurious_fault(struct vm_area_struct *vma,
143 unsigned long address)
144{
145 /* See ptep_set_access_flags comment */
146 if (atomic_read(&vma->vm_mm->context.copros) > 0)
147 flush_tlb_page(vma, address);
148}
149
a145abf1
AK
150/*
151 * flush the page walk cache for the address
152 */
153static inline void flush_tlb_pgtable(struct mmu_gather *tlb, unsigned long address)
154{
155 /*
156 * Flush the page table walk cache on freeing a page table. We already
157 * have marked the upper/higher level page table entry none by now.
158 * So it is safe to flush PWC here.
159 */
160 if (!radix_enabled())
161 return;
676012a6 162
a145abf1
AK
163 radix__flush_tlb_pwc(tlb, address);
164}
676012a6 165#endif /* _ASM_POWERPC_BOOK3S_64_TLBFLUSH_H */