Merge tag 'efi-fixes-for-v6.16-2' of git://git.kernel.org/pub/scm/linux/kernel/git...
[linux-2.6-block.git] / arch / arm64 / include / asm / mmu_context.h
... / ...
CommitLineData
1/* SPDX-License-Identifier: GPL-2.0-only */
2/*
3 * Based on arch/arm/include/asm/mmu_context.h
4 *
5 * Copyright (C) 1996 Russell King.
6 * Copyright (C) 2012 ARM Ltd.
7 */
8#ifndef __ASM_MMU_CONTEXT_H
9#define __ASM_MMU_CONTEXT_H
10
11#ifndef __ASSEMBLY__
12
13#include <linux/compiler.h>
14#include <linux/sched.h>
15#include <linux/sched/hotplug.h>
16#include <linux/mm_types.h>
17#include <linux/pgtable.h>
18#include <linux/pkeys.h>
19
20#include <asm/cacheflush.h>
21#include <asm/cpufeature.h>
22#include <asm/daifflags.h>
23#include <asm/gcs.h>
24#include <asm/proc-fns.h>
25#include <asm/cputype.h>
26#include <asm/sysreg.h>
27#include <asm/tlbflush.h>
28
29extern bool rodata_full;
30
31static inline void contextidr_thread_switch(struct task_struct *next)
32{
33 if (!IS_ENABLED(CONFIG_PID_IN_CONTEXTIDR))
34 return;
35
36 write_sysreg(task_pid_nr(next), contextidr_el1);
37 isb();
38}
39
40/*
41 * Set TTBR0 to reserved_pg_dir. No translations will be possible via TTBR0.
42 */
43static inline void cpu_set_reserved_ttbr0_nosync(void)
44{
45 unsigned long ttbr = phys_to_ttbr(__pa_symbol(reserved_pg_dir));
46
47 write_sysreg(ttbr, ttbr0_el1);
48}
49
50static inline void cpu_set_reserved_ttbr0(void)
51{
52 cpu_set_reserved_ttbr0_nosync();
53 isb();
54}
55
56void cpu_do_switch_mm(phys_addr_t pgd_phys, struct mm_struct *mm);
57
58static inline void cpu_switch_mm(pgd_t *pgd, struct mm_struct *mm)
59{
60 BUG_ON(pgd == swapper_pg_dir);
61 cpu_do_switch_mm(virt_to_phys(pgd),mm);
62}
63
64/*
65 * TCR.T0SZ value to use when the ID map is active.
66 */
67#define idmap_t0sz TCR_T0SZ(IDMAP_VA_BITS)
68
69/*
70 * Ensure TCR.T0SZ is set to the provided value.
71 */
72static inline void __cpu_set_tcr_t0sz(unsigned long t0sz)
73{
74 unsigned long tcr = read_sysreg(tcr_el1);
75
76 if ((tcr & TCR_T0SZ_MASK) == t0sz)
77 return;
78
79 tcr &= ~TCR_T0SZ_MASK;
80 tcr |= t0sz;
81 write_sysreg(tcr, tcr_el1);
82 isb();
83}
84
85#define cpu_set_default_tcr_t0sz() __cpu_set_tcr_t0sz(TCR_T0SZ(vabits_actual))
86#define cpu_set_idmap_tcr_t0sz() __cpu_set_tcr_t0sz(idmap_t0sz)
87
88/*
89 * Remove the idmap from TTBR0_EL1 and install the pgd of the active mm.
90 *
91 * The idmap lives in the same VA range as userspace, but uses global entries
92 * and may use a different TCR_EL1.T0SZ. To avoid issues resulting from
93 * speculative TLB fetches, we must temporarily install the reserved page
94 * tables while we invalidate the TLBs and set up the correct TCR_EL1.T0SZ.
95 *
96 * If current is a not a user task, the mm covers the TTBR1_EL1 page tables,
97 * which should not be installed in TTBR0_EL1. In this case we can leave the
98 * reserved page tables in place.
99 */
100static inline void cpu_uninstall_idmap(void)
101{
102 struct mm_struct *mm = current->active_mm;
103
104 cpu_set_reserved_ttbr0();
105 local_flush_tlb_all();
106 cpu_set_default_tcr_t0sz();
107
108 if (mm != &init_mm && !system_uses_ttbr0_pan())
109 cpu_switch_mm(mm->pgd, mm);
110}
111
112static inline void cpu_install_idmap(void)
113{
114 cpu_set_reserved_ttbr0();
115 local_flush_tlb_all();
116 cpu_set_idmap_tcr_t0sz();
117
118 cpu_switch_mm(lm_alias(idmap_pg_dir), &init_mm);
119}
120
121/*
122 * Load our new page tables. A strict BBM approach requires that we ensure that
123 * TLBs are free of any entries that may overlap with the global mappings we are
124 * about to install.
125 *
126 * For a real hibernate/resume/kexec cycle TTBR0 currently points to a zero
127 * page, but TLBs may contain stale ASID-tagged entries (e.g. for EFI runtime
128 * services), while for a userspace-driven test_resume cycle it points to
129 * userspace page tables (and we must point it at a zero page ourselves).
130 *
131 * We change T0SZ as part of installing the idmap. This is undone by
132 * cpu_uninstall_idmap() in __cpu_suspend_exit().
133 */
134static inline void cpu_install_ttbr0(phys_addr_t ttbr0, unsigned long t0sz)
135{
136 cpu_set_reserved_ttbr0();
137 local_flush_tlb_all();
138 __cpu_set_tcr_t0sz(t0sz);
139
140 /* avoid cpu_switch_mm() and its SW-PAN and CNP interactions */
141 write_sysreg(ttbr0, ttbr0_el1);
142 isb();
143}
144
145void __cpu_replace_ttbr1(pgd_t *pgdp, bool cnp);
146
147static inline void cpu_enable_swapper_cnp(void)
148{
149 __cpu_replace_ttbr1(lm_alias(swapper_pg_dir), true);
150}
151
152static inline void cpu_replace_ttbr1(pgd_t *pgdp)
153{
154 /*
155 * Only for early TTBR1 replacement before cpucaps are finalized and
156 * before we've decided whether to use CNP.
157 */
158 WARN_ON(system_capabilities_finalized());
159 __cpu_replace_ttbr1(pgdp, false);
160}
161
162/*
163 * It would be nice to return ASIDs back to the allocator, but unfortunately
164 * that introduces a race with a generation rollover where we could erroneously
165 * free an ASID allocated in a future generation. We could workaround this by
166 * freeing the ASID from the context of the dying mm (e.g. in arch_exit_mmap),
167 * but we'd then need to make sure that we didn't dirty any TLBs afterwards.
168 * Setting a reserved TTBR0 or EPD0 would work, but it all gets ugly when you
169 * take CPU migration into account.
170 */
171void check_and_switch_context(struct mm_struct *mm);
172
173#define init_new_context(tsk, mm) init_new_context(tsk, mm)
174static inline int
175init_new_context(struct task_struct *tsk, struct mm_struct *mm)
176{
177 atomic64_set(&mm->context.id, 0);
178 refcount_set(&mm->context.pinned, 0);
179
180 /* pkey 0 is the default, so always reserve it. */
181 mm->context.pkey_allocation_map = BIT(0);
182
183 return 0;
184}
185
186static inline void arch_dup_pkeys(struct mm_struct *oldmm,
187 struct mm_struct *mm)
188{
189 /* Duplicate the oldmm pkey state in mm: */
190 mm->context.pkey_allocation_map = oldmm->context.pkey_allocation_map;
191}
192
193static inline int arch_dup_mmap(struct mm_struct *oldmm, struct mm_struct *mm)
194{
195 arch_dup_pkeys(oldmm, mm);
196
197 return 0;
198}
199
200static inline void arch_exit_mmap(struct mm_struct *mm)
201{
202}
203
204static inline void arch_unmap(struct mm_struct *mm,
205 unsigned long start, unsigned long end)
206{
207}
208
209#ifdef CONFIG_ARM64_SW_TTBR0_PAN
210static inline void update_saved_ttbr0(struct task_struct *tsk,
211 struct mm_struct *mm)
212{
213 u64 ttbr;
214
215 if (!system_uses_ttbr0_pan())
216 return;
217
218 if (mm == &init_mm)
219 ttbr = phys_to_ttbr(__pa_symbol(reserved_pg_dir));
220 else
221 ttbr = phys_to_ttbr(virt_to_phys(mm->pgd)) | ASID(mm) << 48;
222
223 WRITE_ONCE(task_thread_info(tsk)->ttbr0, ttbr);
224}
225#else
226static inline void update_saved_ttbr0(struct task_struct *tsk,
227 struct mm_struct *mm)
228{
229}
230#endif
231
232#define enter_lazy_tlb enter_lazy_tlb
233static inline void
234enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
235{
236 /*
237 * We don't actually care about the ttbr0 mapping, so point it at the
238 * zero page.
239 */
240 update_saved_ttbr0(tsk, &init_mm);
241}
242
243static inline void __switch_mm(struct mm_struct *next)
244{
245 /*
246 * init_mm.pgd does not contain any user mappings and it is always
247 * active for kernel addresses in TTBR1. Just set the reserved TTBR0.
248 */
249 if (next == &init_mm) {
250 cpu_set_reserved_ttbr0();
251 return;
252 }
253
254 check_and_switch_context(next);
255}
256
257static inline void
258switch_mm(struct mm_struct *prev, struct mm_struct *next,
259 struct task_struct *tsk)
260{
261 if (prev != next)
262 __switch_mm(next);
263
264 /*
265 * Update the saved TTBR0_EL1 of the scheduled-in task as the previous
266 * value may have not been initialised yet (activate_mm caller) or the
267 * ASID has changed since the last run (following the context switch
268 * of another thread of the same process).
269 */
270 update_saved_ttbr0(tsk, next);
271}
272
273static inline const struct cpumask *
274__task_cpu_possible_mask(struct task_struct *p, const struct cpumask *mask)
275{
276 if (!static_branch_unlikely(&arm64_mismatched_32bit_el0))
277 return mask;
278
279 if (!is_compat_thread(task_thread_info(p)))
280 return mask;
281
282 return system_32bit_el0_cpumask();
283}
284
285static inline const struct cpumask *
286task_cpu_possible_mask(struct task_struct *p)
287{
288 return __task_cpu_possible_mask(p, cpu_possible_mask);
289}
290#define task_cpu_possible_mask task_cpu_possible_mask
291
292const struct cpumask *task_cpu_fallback_mask(struct task_struct *p);
293
294void verify_cpu_asid_bits(void);
295void post_ttbr_update_workaround(void);
296
297unsigned long arm64_mm_context_get(struct mm_struct *mm);
298void arm64_mm_context_put(struct mm_struct *mm);
299
300#define mm_untag_mask mm_untag_mask
301static inline unsigned long mm_untag_mask(struct mm_struct *mm)
302{
303 return -1UL >> 8;
304}
305
306/*
307 * Only enforce protection keys on the current process, because there is no
308 * user context to access POR_EL0 for another address space.
309 */
310static inline bool arch_vma_access_permitted(struct vm_area_struct *vma,
311 bool write, bool execute, bool foreign)
312{
313 if (!system_supports_poe())
314 return true;
315
316 /* allow access if the VMA is not one from this process */
317 if (foreign || vma_is_foreign(vma))
318 return true;
319
320 return por_el0_allows_pkey(vma_pkey(vma), write, execute);
321}
322
323#define deactivate_mm deactivate_mm
324static inline void deactivate_mm(struct task_struct *tsk,
325 struct mm_struct *mm)
326{
327 gcs_free(tsk);
328}
329
330
331#include <asm-generic/mmu_context.h>
332
333#endif /* !__ASSEMBLY__ */
334
335#endif /* !__ASM_MMU_CONTEXT_H */