Commit | Line | Data |
---|---|---|
b2441318 | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
047ea784 PM |
2 | #ifndef __ASM_POWERPC_MMU_CONTEXT_H |
3 | #define __ASM_POWERPC_MMU_CONTEXT_H | |
88ced031 | 4 | #ifdef __KERNEL__ |
047ea784 | 5 | |
5e696617 BH |
6 | #include <linux/kernel.h> |
7 | #include <linux/mm.h> | |
8 | #include <linux/sched.h> | |
9 | #include <linux/spinlock.h> | |
80a7cc6c KG |
10 | #include <asm/mmu.h> |
11 | #include <asm/cputable.h> | |
5e696617 | 12 | #include <asm/cputhreads.h> |
80a7cc6c KG |
13 | |
14 | /* | |
5e696617 | 15 | * Most if the context management is out of line |
80a7cc6c | 16 | */ |
1da177e4 LT |
17 | extern int init_new_context(struct task_struct *tsk, struct mm_struct *mm); |
18 | extern void destroy_context(struct mm_struct *mm); | |
15b244a8 AK |
19 | #ifdef CONFIG_SPAPR_TCE_IOMMU |
20 | struct mm_iommu_table_group_mem_t; | |
21 | ||
2e5bbb54 | 22 | extern int isolate_lru_page(struct page *page); /* from internal.h */ |
d7baee69 | 23 | extern bool mm_iommu_preregistered(struct mm_struct *mm); |
e0bf78b0 | 24 | extern long mm_iommu_new(struct mm_struct *mm, |
d7baee69 | 25 | unsigned long ua, unsigned long entries, |
15b244a8 | 26 | struct mm_iommu_table_group_mem_t **pmem); |
d7baee69 AK |
27 | extern long mm_iommu_put(struct mm_struct *mm, |
28 | struct mm_iommu_table_group_mem_t *mem); | |
88f54a35 AK |
29 | extern void mm_iommu_init(struct mm_struct *mm); |
30 | extern void mm_iommu_cleanup(struct mm_struct *mm); | |
d7baee69 AK |
31 | extern struct mm_iommu_table_group_mem_t *mm_iommu_lookup(struct mm_struct *mm, |
32 | unsigned long ua, unsigned long size); | |
6b5c19c5 AK |
33 | extern struct mm_iommu_table_group_mem_t *mm_iommu_lookup_rm( |
34 | struct mm_struct *mm, unsigned long ua, unsigned long size); | |
e0bf78b0 | 35 | extern struct mm_iommu_table_group_mem_t *mm_iommu_get(struct mm_struct *mm, |
d7baee69 | 36 | unsigned long ua, unsigned long entries); |
15b244a8 | 37 | extern long mm_iommu_ua_to_hpa(struct mm_iommu_table_group_mem_t *mem, |
76fa4975 | 38 | unsigned long ua, unsigned int pageshift, unsigned long *hpa); |
6b5c19c5 | 39 | extern long mm_iommu_ua_to_hpa_rm(struct mm_iommu_table_group_mem_t *mem, |
76fa4975 | 40 | unsigned long ua, unsigned int pageshift, unsigned long *hpa); |
425333bf | 41 | extern void mm_iommu_ua_mark_dirty_rm(struct mm_struct *mm, unsigned long ua); |
15b244a8 AK |
42 | extern long mm_iommu_mapped_inc(struct mm_iommu_table_group_mem_t *mem); |
43 | extern void mm_iommu_mapped_dec(struct mm_iommu_table_group_mem_t *mem); | |
44 | #endif | |
1da177e4 | 45 | extern void switch_slb(struct task_struct *tsk, struct mm_struct *mm); |
5e696617 | 46 | extern void set_context(unsigned long id, pgd_t *pgd); |
1da177e4 | 47 | |
6f0ef0f5 | 48 | #ifdef CONFIG_PPC_BOOK3S_64 |
7e381c0f | 49 | extern void radix__switch_mmu_context(struct mm_struct *prev, |
a25bd72b | 50 | struct mm_struct *next); |
d2adba3f AK |
51 | static inline void switch_mmu_context(struct mm_struct *prev, |
52 | struct mm_struct *next, | |
53 | struct task_struct *tsk) | |
54 | { | |
7e381c0f AK |
55 | if (radix_enabled()) |
56 | return radix__switch_mmu_context(prev, next); | |
d2adba3f AK |
57 | return switch_slb(tsk, next); |
58 | } | |
59 | ||
a336f2f5 | 60 | extern int hash__alloc_context_id(void); |
82228e36 | 61 | extern void hash__reserve_context_id(int id); |
e85a4710 | 62 | extern void __destroy_context(int context_id); |
6f0ef0f5 | 63 | static inline void mmu_context_init(void) { } |
f384796c AK |
64 | |
65 | static inline int alloc_extended_context(struct mm_struct *mm, | |
66 | unsigned long ea) | |
67 | { | |
68 | int context_id; | |
69 | ||
70 | int index = ea >> MAX_EA_BITS_PER_CONTEXT; | |
71 | ||
72 | context_id = hash__alloc_context_id(); | |
73 | if (context_id < 0) | |
74 | return context_id; | |
75 | ||
76 | VM_WARN_ON(mm->context.extended_id[index]); | |
77 | mm->context.extended_id[index] = context_id; | |
78 | return context_id; | |
79 | } | |
80 | ||
81 | static inline bool need_extra_context(struct mm_struct *mm, unsigned long ea) | |
82 | { | |
83 | int context_id; | |
84 | ||
c9f80734 | 85 | context_id = get_user_context(&mm->context, ea); |
f384796c AK |
86 | if (!context_id) |
87 | return true; | |
88 | return false; | |
89 | } | |
90 | ||
6f0ef0f5 | 91 | #else |
d2adba3f AK |
92 | extern void switch_mmu_context(struct mm_struct *prev, struct mm_struct *next, |
93 | struct task_struct *tsk); | |
c83ec269 AG |
94 | extern unsigned long __init_new_context(void); |
95 | extern void __destroy_context(unsigned long context_id); | |
6f0ef0f5 | 96 | extern void mmu_context_init(void); |
f384796c AK |
97 | static inline int alloc_extended_context(struct mm_struct *mm, |
98 | unsigned long ea) | |
99 | { | |
100 | /* non book3s_64 should never find this called */ | |
101 | WARN_ON(1); | |
102 | return -ENOMEM; | |
103 | } | |
104 | ||
105 | static inline bool need_extra_context(struct mm_struct *mm, unsigned long ea) | |
106 | { | |
107 | return false; | |
108 | } | |
6f0ef0f5 BH |
109 | #endif |
110 | ||
a25bd72b BH |
111 | #if defined(CONFIG_KVM_BOOK3S_HV_POSSIBLE) && defined(CONFIG_PPC_RADIX_MMU) |
112 | extern void radix_kvm_prefetch_workaround(struct mm_struct *mm); | |
113 | #else | |
114 | static inline void radix_kvm_prefetch_workaround(struct mm_struct *mm) { } | |
115 | #endif | |
116 | ||
851d2e2f THFL |
117 | extern void switch_cop(struct mm_struct *next); |
118 | extern int use_cop(unsigned long acop, struct mm_struct *mm); | |
119 | extern void drop_cop(unsigned long acop, struct mm_struct *mm); | |
120 | ||
03b8abed FB |
121 | #ifdef CONFIG_PPC_BOOK3S_64 |
122 | static inline void inc_mm_active_cpus(struct mm_struct *mm) | |
123 | { | |
124 | atomic_inc(&mm->context.active_cpus); | |
125 | } | |
126 | ||
127 | static inline void dec_mm_active_cpus(struct mm_struct *mm) | |
128 | { | |
129 | atomic_dec(&mm->context.active_cpus); | |
130 | } | |
131 | ||
132 | static inline void mm_context_add_copro(struct mm_struct *mm) | |
133 | { | |
134 | /* | |
aff6f8cb BH |
135 | * If any copro is in use, increment the active CPU count |
136 | * in order to force TLB invalidations to be global as to | |
137 | * propagate to the Nest MMU. | |
03b8abed | 138 | */ |
aff6f8cb BH |
139 | if (atomic_inc_return(&mm->context.copros) == 1) |
140 | inc_mm_active_cpus(mm); | |
03b8abed FB |
141 | } |
142 | ||
143 | static inline void mm_context_remove_copro(struct mm_struct *mm) | |
144 | { | |
aff6f8cb BH |
145 | int c; |
146 | ||
03b8abed | 147 | /* |
cca19f0b FB |
148 | * When removing the last copro, we need to broadcast a global |
149 | * flush of the full mm, as the next TLBI may be local and the | |
150 | * nMMU and/or PSL need to be cleaned up. | |
151 | * | |
152 | * Both the 'copros' and 'active_cpus' counts are looked at in | |
153 | * flush_all_mm() to determine the scope (local/global) of the | |
154 | * TLBIs, so we need to flush first before decrementing | |
155 | * 'copros'. If this API is used by several callers for the | |
156 | * same context, it can lead to over-flushing. It's hopefully | |
157 | * not common enough to be a problem. | |
03b8abed FB |
158 | * |
159 | * Skip on hash, as we don't know how to do the proper flush | |
160 | * for the time being. Invalidations will remain global if | |
cca19f0b FB |
161 | * used on hash. Note that we can't drop 'copros' either, as |
162 | * it could make some invalidations local with no flush | |
163 | * in-between. | |
03b8abed | 164 | */ |
cca19f0b | 165 | if (radix_enabled()) { |
03b8abed | 166 | flush_all_mm(mm); |
cca19f0b FB |
167 | |
168 | c = atomic_dec_if_positive(&mm->context.copros); | |
169 | /* Detect imbalance between add and remove */ | |
170 | WARN_ON(c < 0); | |
171 | ||
172 | if (c == 0) | |
173 | dec_mm_active_cpus(mm); | |
03b8abed FB |
174 | } |
175 | } | |
176 | #else | |
177 | static inline void inc_mm_active_cpus(struct mm_struct *mm) { } | |
178 | static inline void dec_mm_active_cpus(struct mm_struct *mm) { } | |
179 | static inline void mm_context_add_copro(struct mm_struct *mm) { } | |
180 | static inline void mm_context_remove_copro(struct mm_struct *mm) { } | |
181 | #endif | |
182 | ||
183 | ||
3a2df379 BH |
184 | extern void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next, |
185 | struct task_struct *tsk); | |
1da177e4 | 186 | |
9765ad13 DG |
187 | static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, |
188 | struct task_struct *tsk) | |
189 | { | |
190 | unsigned long flags; | |
191 | ||
192 | local_irq_save(flags); | |
193 | switch_mm_irqs_off(prev, next, tsk); | |
194 | local_irq_restore(flags); | |
195 | } | |
196 | #define switch_mm_irqs_off switch_mm_irqs_off | |
197 | ||
198 | ||
1da177e4 LT |
199 | #define deactivate_mm(tsk,mm) do { } while (0) |
200 | ||
201 | /* | |
202 | * After we have set current->mm to a new value, this activates | |
203 | * the context for the new mm so we see the new mappings. | |
204 | */ | |
205 | static inline void activate_mm(struct mm_struct *prev, struct mm_struct *next) | |
206 | { | |
1da177e4 | 207 | switch_mm(prev, next, current); |
1da177e4 LT |
208 | } |
209 | ||
5e696617 BH |
210 | /* We don't currently use enter_lazy_tlb() for anything */ |
211 | static inline void enter_lazy_tlb(struct mm_struct *mm, | |
212 | struct task_struct *tsk) | |
213 | { | |
25d21ad6 BH |
214 | /* 64-bit Book3E keeps track of current PGD in the PACA */ |
215 | #ifdef CONFIG_PPC_BOOK3E_64 | |
216 | get_paca()->pgd = NULL; | |
217 | #endif | |
5e696617 BH |
218 | } |
219 | ||
32ea4c14 | 220 | #ifdef CONFIG_PPC_BOOK3E_64 |
83d3f0e9 LD |
221 | static inline void arch_exit_mmap(struct mm_struct *mm) |
222 | { | |
223 | } | |
30b49ec7 NP |
224 | #else |
225 | extern void arch_exit_mmap(struct mm_struct *mm); | |
226 | #endif | |
83d3f0e9 LD |
227 | |
228 | static inline void arch_unmap(struct mm_struct *mm, | |
229 | struct vm_area_struct *vma, | |
230 | unsigned long start, unsigned long end) | |
231 | { | |
232 | if (start <= mm->context.vdso_base && mm->context.vdso_base < end) | |
233 | mm->context.vdso_base = 0; | |
234 | } | |
235 | ||
236 | static inline void arch_bprm_mm_init(struct mm_struct *mm, | |
237 | struct vm_area_struct *vma) | |
238 | { | |
239 | } | |
240 | ||
1137573a RP |
241 | #ifdef CONFIG_PPC_MEM_KEYS |
242 | bool arch_vma_access_permitted(struct vm_area_struct *vma, bool write, | |
243 | bool execute, bool foreign); | |
2cd4bd19 | 244 | void arch_dup_pkeys(struct mm_struct *oldmm, struct mm_struct *mm); |
1137573a | 245 | #else /* CONFIG_PPC_MEM_KEYS */ |
1b2ee126 | 246 | static inline bool arch_vma_access_permitted(struct vm_area_struct *vma, |
d61172b4 | 247 | bool write, bool execute, bool foreign) |
33a709b2 DH |
248 | { |
249 | /* by default, allow everything */ | |
250 | return true; | |
251 | } | |
92e3da3c | 252 | |
4fb158f6 | 253 | #define pkey_mm_init(mm) |
06bb53b3 RP |
254 | #define thread_pkey_regs_save(thread) |
255 | #define thread_pkey_regs_restore(new_thread, old_thread) | |
256 | #define thread_pkey_regs_init(thread) | |
2cd4bd19 | 257 | #define arch_dup_pkeys(oldmm, mm) |
87bbabbe | 258 | |
a6590ca5 RP |
259 | static inline u64 pte_to_hpte_pkey_bits(u64 pteflags) |
260 | { | |
261 | return 0x0UL; | |
262 | } | |
263 | ||
4fb158f6 RP |
264 | #endif /* CONFIG_PPC_MEM_KEYS */ |
265 | ||
2cd4bd19 RP |
266 | static inline int arch_dup_mmap(struct mm_struct *oldmm, |
267 | struct mm_struct *mm) | |
268 | { | |
269 | arch_dup_pkeys(oldmm, mm); | |
270 | return 0; | |
271 | } | |
272 | ||
88ced031 | 273 | #endif /* __KERNEL__ */ |
047ea784 | 274 | #endif /* __ASM_POWERPC_MMU_CONTEXT_H */ |