Commit | Line | Data |
---|---|---|
b3901d54 CM |
1 | /* |
2 | * Based on arch/arm/include/asm/mmu_context.h | |
3 | * | |
4 | * Copyright (C) 1996 Russell King. | |
5 | * Copyright (C) 2012 ARM Ltd. | |
6 | * | |
7 | * This program is free software; you can redistribute it and/or modify | |
8 | * it under the terms of the GNU General Public License version 2 as | |
9 | * published by the Free Software Foundation. | |
10 | * | |
11 | * This program is distributed in the hope that it will be useful, | |
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
14 | * GNU General Public License for more details. | |
15 | * | |
16 | * You should have received a copy of the GNU General Public License | |
17 | * along with this program. If not, see <http://www.gnu.org/licenses/>. | |
18 | */ | |
19 | #ifndef __ASM_MMU_CONTEXT_H | |
20 | #define __ASM_MMU_CONTEXT_H | |
21 | ||
38fd94b0 CC |
22 | #define FALKOR_RESERVED_ASID 1 |
23 | ||
24 | #ifndef __ASSEMBLY__ | |
25 | ||
b3901d54 CM |
26 | #include <linux/compiler.h> |
27 | #include <linux/sched.h> | |
ef8bd77f | 28 | #include <linux/sched/hotplug.h> |
589ee628 | 29 | #include <linux/mm_types.h> |
b3901d54 CM |
30 | |
31 | #include <asm/cacheflush.h> | |
39bc88e5 | 32 | #include <asm/cpufeature.h> |
b3901d54 CM |
33 | #include <asm/proc-fns.h> |
34 | #include <asm-generic/mm_hooks.h> | |
35 | #include <asm/cputype.h> | |
36 | #include <asm/pgtable.h> | |
adf75899 | 37 | #include <asm/sysreg.h> |
9e8e865b | 38 | #include <asm/tlbflush.h> |
b3901d54 | 39 | |
ec45d1cf WD |
40 | static inline void contextidr_thread_switch(struct task_struct *next) |
41 | { | |
d3ea42aa MR |
42 | if (!IS_ENABLED(CONFIG_PID_IN_CONTEXTIDR)) |
43 | return; | |
44 | ||
adf75899 MR |
45 | write_sysreg(task_pid_nr(next), contextidr_el1); |
46 | isb(); | |
ec45d1cf | 47 | } |
ec45d1cf | 48 | |
b3901d54 CM |
49 | /* |
50 | * Set TTBR0 to empty_zero_page. No translations will be possible via TTBR0. | |
51 | */ | |
52 | static inline void cpu_set_reserved_ttbr0(void) | |
53 | { | |
2077be67 | 54 | unsigned long ttbr = __pa_symbol(empty_zero_page); |
b3901d54 | 55 | |
adf75899 MR |
56 | write_sysreg(ttbr, ttbr0_el1); |
57 | isb(); | |
b3901d54 CM |
58 | } |
59 | ||
dd006da2 AB |
60 | /* |
61 | * TCR.T0SZ value to use when the ID map is active. Usually equals | |
62 | * TCR_T0SZ(VA_BITS), unless system RAM is positioned very high in | |
63 | * physical memory, in which case it will be smaller. | |
64 | */ | |
65 | extern u64 idmap_t0sz; | |
66 | ||
67 | static inline bool __cpu_uses_extended_idmap(void) | |
68 | { | |
69 | return (!IS_ENABLED(CONFIG_ARM64_VA_BITS_48) && | |
70 | unlikely(idmap_t0sz != TCR_T0SZ(VA_BITS))); | |
71 | } | |
72 | ||
dd006da2 AB |
73 | /* |
74 | * Set TCR.T0SZ to its default value (based on VA_BITS) | |
75 | */ | |
609116d2 | 76 | static inline void __cpu_set_tcr_t0sz(unsigned long t0sz) |
dd006da2 | 77 | { |
c51e97d8 WD |
78 | unsigned long tcr; |
79 | ||
80 | if (!__cpu_uses_extended_idmap()) | |
81 | return; | |
82 | ||
adf75899 MR |
83 | tcr = read_sysreg(tcr_el1); |
84 | tcr &= ~TCR_T0SZ_MASK; | |
85 | tcr |= t0sz << TCR_T0SZ_OFFSET; | |
86 | write_sysreg(tcr, tcr_el1); | |
87 | isb(); | |
dd006da2 AB |
88 | } |
89 | ||
609116d2 MR |
90 | #define cpu_set_default_tcr_t0sz() __cpu_set_tcr_t0sz(TCR_T0SZ(VA_BITS)) |
91 | #define cpu_set_idmap_tcr_t0sz() __cpu_set_tcr_t0sz(idmap_t0sz) | |
92 | ||
9e8e865b MR |
93 | /* |
94 | * Remove the idmap from TTBR0_EL1 and install the pgd of the active mm. | |
95 | * | |
96 | * The idmap lives in the same VA range as userspace, but uses global entries | |
97 | * and may use a different TCR_EL1.T0SZ. To avoid issues resulting from | |
98 | * speculative TLB fetches, we must temporarily install the reserved page | |
99 | * tables while we invalidate the TLBs and set up the correct TCR_EL1.T0SZ. | |
100 | * | |
101 | * If current is a not a user task, the mm covers the TTBR1_EL1 page tables, | |
102 | * which should not be installed in TTBR0_EL1. In this case we can leave the | |
103 | * reserved page tables in place. | |
104 | */ | |
105 | static inline void cpu_uninstall_idmap(void) | |
106 | { | |
107 | struct mm_struct *mm = current->active_mm; | |
108 | ||
109 | cpu_set_reserved_ttbr0(); | |
110 | local_flush_tlb_all(); | |
111 | cpu_set_default_tcr_t0sz(); | |
112 | ||
39bc88e5 | 113 | if (mm != &init_mm && !system_uses_ttbr0_pan()) |
9e8e865b MR |
114 | cpu_switch_mm(mm->pgd, mm); |
115 | } | |
116 | ||
609116d2 MR |
117 | static inline void cpu_install_idmap(void) |
118 | { | |
119 | cpu_set_reserved_ttbr0(); | |
120 | local_flush_tlb_all(); | |
121 | cpu_set_idmap_tcr_t0sz(); | |
122 | ||
2077be67 | 123 | cpu_switch_mm(lm_alias(idmap_pg_dir), &init_mm); |
609116d2 MR |
124 | } |
125 | ||
50e1881d MR |
126 | /* |
127 | * Atomically replaces the active TTBR1_EL1 PGD with a new VA-compatible PGD, | |
128 | * avoiding the possibility of conflicting TLB entries being allocated. | |
129 | */ | |
130 | static inline void cpu_replace_ttbr1(pgd_t *pgd) | |
131 | { | |
132 | typedef void (ttbr_replace_func)(phys_addr_t); | |
133 | extern ttbr_replace_func idmap_cpu_replace_ttbr1; | |
134 | ttbr_replace_func *replace_phys; | |
135 | ||
136 | phys_addr_t pgd_phys = virt_to_phys(pgd); | |
137 | ||
2077be67 | 138 | replace_phys = (void *)__pa_symbol(idmap_cpu_replace_ttbr1); |
50e1881d MR |
139 | |
140 | cpu_install_idmap(); | |
141 | replace_phys(pgd_phys); | |
142 | cpu_uninstall_idmap(); | |
143 | } | |
144 | ||
5aec715d WD |
145 | /* |
146 | * It would be nice to return ASIDs back to the allocator, but unfortunately | |
147 | * that introduces a race with a generation rollover where we could erroneously | |
148 | * free an ASID allocated in a future generation. We could workaround this by | |
149 | * freeing the ASID from the context of the dying mm (e.g. in arch_exit_mmap), | |
150 | * but we'd then need to make sure that we didn't dirty any TLBs afterwards. | |
151 | * Setting a reserved TTBR0 or EPD0 would work, but it all gets ugly when you | |
152 | * take CPU migration into account. | |
153 | */ | |
b3901d54 | 154 | #define destroy_context(mm) do { } while(0) |
5aec715d | 155 | void check_and_switch_context(struct mm_struct *mm, unsigned int cpu); |
b3901d54 | 156 | |
65da0a8e | 157 | #define init_new_context(tsk,mm) ({ atomic64_set(&(mm)->context.id, 0); 0; }) |
b3901d54 CM |
158 | |
159 | /* | |
160 | * This is called when "tsk" is about to enter lazy TLB mode. | |
161 | * | |
162 | * mm: describes the currently active mm context | |
163 | * tsk: task which is entering lazy tlb | |
164 | * cpu: cpu number which is entering lazy tlb | |
165 | * | |
166 | * tsk->mm will be NULL | |
167 | */ | |
168 | static inline void | |
169 | enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk) | |
170 | { | |
171 | } | |
172 | ||
39bc88e5 CM |
173 | #ifdef CONFIG_ARM64_SW_TTBR0_PAN |
174 | static inline void update_saved_ttbr0(struct task_struct *tsk, | |
175 | struct mm_struct *mm) | |
b3901d54 | 176 | { |
39bc88e5 CM |
177 | if (system_uses_ttbr0_pan()) { |
178 | BUG_ON(mm->pgd == swapper_pg_dir); | |
179 | task_thread_info(tsk)->ttbr0 = | |
180 | virt_to_phys(mm->pgd) | ASID(mm) << 48; | |
181 | } | |
182 | } | |
183 | #else | |
184 | static inline void update_saved_ttbr0(struct task_struct *tsk, | |
185 | struct mm_struct *mm) | |
186 | { | |
187 | } | |
188 | #endif | |
b3901d54 | 189 | |
39bc88e5 CM |
190 | static inline void __switch_mm(struct mm_struct *next) |
191 | { | |
192 | unsigned int cpu = smp_processor_id(); | |
c2775b2e | 193 | |
e53f21bc CM |
194 | /* |
195 | * init_mm.pgd does not contain any user mappings and it is always | |
196 | * active for kernel addresses in TTBR1. Just set the reserved TTBR0. | |
197 | */ | |
198 | if (next == &init_mm) { | |
199 | cpu_set_reserved_ttbr0(); | |
200 | return; | |
201 | } | |
202 | ||
c2775b2e | 203 | check_and_switch_context(next, cpu); |
b3901d54 CM |
204 | } |
205 | ||
39bc88e5 CM |
206 | static inline void |
207 | switch_mm(struct mm_struct *prev, struct mm_struct *next, | |
208 | struct task_struct *tsk) | |
209 | { | |
210 | if (prev != next) | |
211 | __switch_mm(next); | |
212 | ||
213 | /* | |
214 | * Update the saved TTBR0_EL1 of the scheduled-in task as the previous | |
215 | * value may have not been initialised yet (activate_mm caller) or the | |
216 | * ASID has changed since the last run (following the context switch | |
217 | * of another thread of the same process). Avoid setting the reserved | |
218 | * TTBR0_EL1 to swapper_pg_dir (init_mm; e.g. via idle_task_exit). | |
219 | */ | |
220 | if (next != &init_mm) | |
221 | update_saved_ttbr0(tsk, next); | |
222 | } | |
223 | ||
b3901d54 | 224 | #define deactivate_mm(tsk,mm) do { } while (0) |
39bc88e5 | 225 | #define activate_mm(prev,next) switch_mm(prev, next, current) |
b3901d54 | 226 | |
13f417f3 SP |
227 | void verify_cpu_asid_bits(void); |
228 | ||
38fd94b0 CC |
229 | #endif /* !__ASSEMBLY__ */ |
230 | ||
231 | #endif /* !__ASM_MMU_CONTEXT_H */ |