Commit | Line | Data |
---|---|---|
caab277b | 1 | /* SPDX-License-Identifier: GPL-2.0-only */ |
b3901d54 CM |
2 | /* |
3 | * Based on arch/arm/include/asm/mmu_context.h | |
4 | * | |
5 | * Copyright (C) 1996 Russell King. | |
6 | * Copyright (C) 2012 ARM Ltd. | |
b3901d54 CM |
7 | */ |
8 | #ifndef __ASM_MMU_CONTEXT_H | |
9 | #define __ASM_MMU_CONTEXT_H | |
10 | ||
38fd94b0 CC |
11 | #ifndef __ASSEMBLY__ |
12 | ||
b3901d54 CM |
13 | #include <linux/compiler.h> |
14 | #include <linux/sched.h> | |
ef8bd77f | 15 | #include <linux/sched/hotplug.h> |
589ee628 | 16 | #include <linux/mm_types.h> |
65fddcfc | 17 | #include <linux/pgtable.h> |
b3901d54 CM |
18 | |
19 | #include <asm/cacheflush.h> | |
39bc88e5 | 20 | #include <asm/cpufeature.h> |
b3901d54 CM |
21 | #include <asm/proc-fns.h> |
22 | #include <asm-generic/mm_hooks.h> | |
23 | #include <asm/cputype.h> | |
adf75899 | 24 | #include <asm/sysreg.h> |
9e8e865b | 25 | #include <asm/tlbflush.h> |
b3901d54 | 26 | |
c55191e9 AB |
27 | extern bool rodata_full; |
28 | ||
ec45d1cf WD |
29 | static inline void contextidr_thread_switch(struct task_struct *next) |
30 | { | |
d3ea42aa MR |
31 | if (!IS_ENABLED(CONFIG_PID_IN_CONTEXTIDR)) |
32 | return; | |
33 | ||
adf75899 MR |
34 | write_sysreg(task_pid_nr(next), contextidr_el1); |
35 | isb(); | |
ec45d1cf | 36 | } |
ec45d1cf | 37 | |
b3901d54 CM |
38 | /* |
39 | * Set TTBR0 to empty_zero_page. No translations will be possible via TTBR0. | |
40 | */ | |
41 | static inline void cpu_set_reserved_ttbr0(void) | |
42 | { | |
529c4b05 | 43 | unsigned long ttbr = phys_to_ttbr(__pa_symbol(empty_zero_page)); |
b3901d54 | 44 | |
adf75899 MR |
45 | write_sysreg(ttbr, ttbr0_el1); |
46 | isb(); | |
b3901d54 CM |
47 | } |
48 | ||
25b92693 MR |
49 | void cpu_do_switch_mm(phys_addr_t pgd_phys, struct mm_struct *mm); |
50 | ||
7655abb9 WD |
51 | static inline void cpu_switch_mm(pgd_t *pgd, struct mm_struct *mm) |
52 | { | |
53 | BUG_ON(pgd == swapper_pg_dir); | |
54 | cpu_set_reserved_ttbr0(); | |
55 | cpu_do_switch_mm(virt_to_phys(pgd),mm); | |
56 | } | |
57 | ||
dd006da2 AB |
58 | /* |
59 | * TCR.T0SZ value to use when the ID map is active. Usually equals | |
60 | * TCR_T0SZ(VA_BITS), unless system RAM is positioned very high in | |
61 | * physical memory, in which case it will be smaller. | |
62 | */ | |
63 | extern u64 idmap_t0sz; | |
fa2a8445 | 64 | extern u64 idmap_ptrs_per_pgd; |
dd006da2 AB |
65 | |
66 | static inline bool __cpu_uses_extended_idmap(void) | |
67 | { | |
b6d00d47 | 68 | if (IS_ENABLED(CONFIG_ARM64_VA_BITS_52)) |
67e7fdfc SC |
69 | return false; |
70 | ||
6a205420 | 71 | return unlikely(idmap_t0sz != TCR_T0SZ(VA_BITS)); |
dd006da2 AB |
72 | } |
73 | ||
fa2a8445 KM |
74 | /* |
75 | * True if the extended ID map requires an extra level of translation table | |
76 | * to be configured. | |
77 | */ | |
78 | static inline bool __cpu_uses_extended_idmap_level(void) | |
79 | { | |
6a205420 | 80 | return ARM64_HW_PGTABLE_LEVELS(64 - idmap_t0sz) > CONFIG_PGTABLE_LEVELS; |
fa2a8445 KM |
81 | } |
82 | ||
dd006da2 AB |
83 | /* |
84 | * Set TCR.T0SZ to its default value (based on VA_BITS) | |
85 | */ | |
609116d2 | 86 | static inline void __cpu_set_tcr_t0sz(unsigned long t0sz) |
dd006da2 | 87 | { |
c51e97d8 WD |
88 | unsigned long tcr; |
89 | ||
90 | if (!__cpu_uses_extended_idmap()) | |
91 | return; | |
92 | ||
adf75899 MR |
93 | tcr = read_sysreg(tcr_el1); |
94 | tcr &= ~TCR_T0SZ_MASK; | |
95 | tcr |= t0sz << TCR_T0SZ_OFFSET; | |
96 | write_sysreg(tcr, tcr_el1); | |
97 | isb(); | |
dd006da2 AB |
98 | } |
99 | ||
5383cc6e | 100 | #define cpu_set_default_tcr_t0sz() __cpu_set_tcr_t0sz(TCR_T0SZ(vabits_actual)) |
609116d2 MR |
101 | #define cpu_set_idmap_tcr_t0sz() __cpu_set_tcr_t0sz(idmap_t0sz) |
102 | ||
9e8e865b MR |
103 | /* |
104 | * Remove the idmap from TTBR0_EL1 and install the pgd of the active mm. | |
105 | * | |
106 | * The idmap lives in the same VA range as userspace, but uses global entries | |
107 | * and may use a different TCR_EL1.T0SZ. To avoid issues resulting from | |
108 | * speculative TLB fetches, we must temporarily install the reserved page | |
109 | * tables while we invalidate the TLBs and set up the correct TCR_EL1.T0SZ. | |
110 | * | |
111 | * If current is a not a user task, the mm covers the TTBR1_EL1 page tables, | |
112 | * which should not be installed in TTBR0_EL1. In this case we can leave the | |
113 | * reserved page tables in place. | |
114 | */ | |
115 | static inline void cpu_uninstall_idmap(void) | |
116 | { | |
117 | struct mm_struct *mm = current->active_mm; | |
118 | ||
119 | cpu_set_reserved_ttbr0(); | |
120 | local_flush_tlb_all(); | |
121 | cpu_set_default_tcr_t0sz(); | |
122 | ||
39bc88e5 | 123 | if (mm != &init_mm && !system_uses_ttbr0_pan()) |
9e8e865b MR |
124 | cpu_switch_mm(mm->pgd, mm); |
125 | } | |
126 | ||
609116d2 MR |
127 | static inline void cpu_install_idmap(void) |
128 | { | |
129 | cpu_set_reserved_ttbr0(); | |
130 | local_flush_tlb_all(); | |
131 | cpu_set_idmap_tcr_t0sz(); | |
132 | ||
2077be67 | 133 | cpu_switch_mm(lm_alias(idmap_pg_dir), &init_mm); |
609116d2 MR |
134 | } |
135 | ||
50e1881d MR |
136 | /* |
137 | * Atomically replaces the active TTBR1_EL1 PGD with a new VA-compatible PGD, | |
138 | * avoiding the possibility of conflicting TLB entries being allocated. | |
139 | */ | |
20a004e7 | 140 | static inline void cpu_replace_ttbr1(pgd_t *pgdp) |
50e1881d MR |
141 | { |
142 | typedef void (ttbr_replace_func)(phys_addr_t); | |
143 | extern ttbr_replace_func idmap_cpu_replace_ttbr1; | |
144 | ttbr_replace_func *replace_phys; | |
145 | ||
5ffdfaed VM |
146 | /* phys_to_ttbr() zeros lower 2 bits of ttbr with 52-bit PA */ |
147 | phys_addr_t ttbr1 = phys_to_ttbr(virt_to_phys(pgdp)); | |
148 | ||
149 | if (system_supports_cnp() && !WARN_ON(pgdp != lm_alias(swapper_pg_dir))) { | |
150 | /* | |
151 | * cpu_replace_ttbr1() is used when there's a boot CPU | |
152 | * up (i.e. cpufeature framework is not up yet) and | |
153 | * latter only when we enable CNP via cpufeature's | |
154 | * enable() callback. | |
155 | * Also we rely on the cpu_hwcap bit being set before | |
156 | * calling the enable() function. | |
157 | */ | |
158 | ttbr1 |= TTBR_CNP_BIT; | |
159 | } | |
50e1881d | 160 | |
2077be67 | 161 | replace_phys = (void *)__pa_symbol(idmap_cpu_replace_ttbr1); |
50e1881d MR |
162 | |
163 | cpu_install_idmap(); | |
5ffdfaed | 164 | replace_phys(ttbr1); |
50e1881d MR |
165 | cpu_uninstall_idmap(); |
166 | } | |
167 | ||
5aec715d WD |
168 | /* |
169 | * It would be nice to return ASIDs back to the allocator, but unfortunately | |
170 | * that introduces a race with a generation rollover where we could erroneously | |
171 | * free an ASID allocated in a future generation. We could workaround this by | |
172 | * freeing the ASID from the context of the dying mm (e.g. in arch_exit_mmap), | |
173 | * but we'd then need to make sure that we didn't dirty any TLBs afterwards. | |
174 | * Setting a reserved TTBR0 or EPD0 would work, but it all gets ugly when you | |
175 | * take CPU migration into account. | |
176 | */ | |
b3901d54 | 177 | #define destroy_context(mm) do { } while(0) |
c4885bbb | 178 | void check_and_switch_context(struct mm_struct *mm); |
b3901d54 | 179 | |
48118151 JPB |
180 | static inline int |
181 | init_new_context(struct task_struct *tsk, struct mm_struct *mm) | |
182 | { | |
183 | atomic64_set(&mm->context.id, 0); | |
184 | refcount_set(&mm->context.pinned, 0); | |
185 | return 0; | |
186 | } | |
b3901d54 | 187 | |
39bc88e5 CM |
188 | #ifdef CONFIG_ARM64_SW_TTBR0_PAN |
189 | static inline void update_saved_ttbr0(struct task_struct *tsk, | |
190 | struct mm_struct *mm) | |
b3901d54 | 191 | { |
0adbdfde WD |
192 | u64 ttbr; |
193 | ||
194 | if (!system_uses_ttbr0_pan()) | |
195 | return; | |
196 | ||
197 | if (mm == &init_mm) | |
198 | ttbr = __pa_symbol(empty_zero_page); | |
199 | else | |
200 | ttbr = virt_to_phys(mm->pgd) | ASID(mm) << 48; | |
201 | ||
6b88a32c | 202 | WRITE_ONCE(task_thread_info(tsk)->ttbr0, ttbr); |
39bc88e5 CM |
203 | } |
204 | #else | |
205 | static inline void update_saved_ttbr0(struct task_struct *tsk, | |
206 | struct mm_struct *mm) | |
207 | { | |
208 | } | |
209 | #endif | |
b3901d54 | 210 | |
d96cc49b WD |
211 | static inline void |
212 | enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk) | |
213 | { | |
214 | /* | |
215 | * We don't actually care about the ttbr0 mapping, so point it at the | |
216 | * zero page. | |
217 | */ | |
218 | update_saved_ttbr0(tsk, &init_mm); | |
219 | } | |
220 | ||
39bc88e5 CM |
221 | static inline void __switch_mm(struct mm_struct *next) |
222 | { | |
e53f21bc CM |
223 | /* |
224 | * init_mm.pgd does not contain any user mappings and it is always | |
225 | * active for kernel addresses in TTBR1. Just set the reserved TTBR0. | |
226 | */ | |
227 | if (next == &init_mm) { | |
228 | cpu_set_reserved_ttbr0(); | |
229 | return; | |
230 | } | |
231 | ||
c4885bbb | 232 | check_and_switch_context(next); |
b3901d54 CM |
233 | } |
234 | ||
39bc88e5 CM |
235 | static inline void |
236 | switch_mm(struct mm_struct *prev, struct mm_struct *next, | |
237 | struct task_struct *tsk) | |
238 | { | |
239 | if (prev != next) | |
240 | __switch_mm(next); | |
241 | ||
242 | /* | |
243 | * Update the saved TTBR0_EL1 of the scheduled-in task as the previous | |
244 | * value may have not been initialised yet (activate_mm caller) or the | |
245 | * ASID has changed since the last run (following the context switch | |
0adbdfde | 246 | * of another thread of the same process). |
39bc88e5 | 247 | */ |
0adbdfde | 248 | update_saved_ttbr0(tsk, next); |
39bc88e5 CM |
249 | } |
250 | ||
b3901d54 | 251 | #define deactivate_mm(tsk,mm) do { } while (0) |
39bc88e5 | 252 | #define activate_mm(prev,next) switch_mm(prev, next, current) |
b3901d54 | 253 | |
13f417f3 | 254 | void verify_cpu_asid_bits(void); |
6b88a32c | 255 | void post_ttbr_update_workaround(void); |
13f417f3 | 256 | |
48118151 JPB |
257 | unsigned long arm64_mm_context_get(struct mm_struct *mm); |
258 | void arm64_mm_context_put(struct mm_struct *mm); | |
259 | ||
38fd94b0 CC |
260 | #endif /* !__ASSEMBLY__ */ |
261 | ||
262 | #endif /* !__ASM_MMU_CONTEXT_H */ |