Commit | Line | Data |
---|---|---|
be901e9b MZ |
1 | /* |
2 | * Copyright (C) 2015 - ARM Ltd | |
3 | * Author: Marc Zyngier <marc.zyngier@arm.com> | |
4 | * | |
5 | * This program is free software; you can redistribute it and/or modify | |
6 | * it under the terms of the GNU General Public License version 2 as | |
7 | * published by the Free Software Foundation. | |
8 | * | |
9 | * This program is distributed in the hope that it will be useful, | |
10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
12 | * GNU General Public License for more details. | |
13 | * | |
14 | * You should have received a copy of the GNU General Public License | |
15 | * along with this program. If not, see <http://www.gnu.org/licenses/>. | |
16 | */ | |
17 | ||
5f05a72a | 18 | #include <linux/types.h> |
68908bf7 | 19 | #include <asm/kvm_asm.h> |
13720a56 | 20 | #include <asm/kvm_hyp.h> |
be901e9b | 21 | |
32876224 MZ |
22 | static bool __hyp_text __fpsimd_enabled_nvhe(void) |
23 | { | |
24 | return !(read_sysreg(cptr_el2) & CPTR_EL2_TFP); | |
25 | } | |
26 | ||
27 | static bool __hyp_text __fpsimd_enabled_vhe(void) | |
28 | { | |
29 | return !!(read_sysreg(cpacr_el1) & CPACR_EL1_FPEN); | |
30 | } | |
31 | ||
32 | static hyp_alternate_select(__fpsimd_is_enabled, | |
33 | __fpsimd_enabled_nvhe, __fpsimd_enabled_vhe, | |
34 | ARM64_HAS_VIRT_HOST_EXTN); | |
35 | ||
36 | bool __hyp_text __fpsimd_enabled(void) | |
37 | { | |
38 | return __fpsimd_is_enabled()(); | |
39 | } | |
40 | ||
68908bf7 MZ |
41 | static void __hyp_text __activate_traps_vhe(void) |
42 | { | |
43 | u64 val; | |
44 | ||
45 | val = read_sysreg(cpacr_el1); | |
46 | val |= CPACR_EL1_TTA; | |
47 | val &= ~CPACR_EL1_FPEN; | |
48 | write_sysreg(val, cpacr_el1); | |
49 | ||
50 | write_sysreg(__kvm_hyp_vector, vbar_el1); | |
51 | } | |
52 | ||
53 | static void __hyp_text __activate_traps_nvhe(void) | |
54 | { | |
55 | u64 val; | |
56 | ||
57 | val = CPTR_EL2_DEFAULT; | |
58 | val |= CPTR_EL2_TTA | CPTR_EL2_TFP; | |
59 | write_sysreg(val, cptr_el2); | |
60 | } | |
61 | ||
62 | static hyp_alternate_select(__activate_traps_arch, | |
63 | __activate_traps_nvhe, __activate_traps_vhe, | |
64 | ARM64_HAS_VIRT_HOST_EXTN); | |
65 | ||
be901e9b MZ |
66 | static void __hyp_text __activate_traps(struct kvm_vcpu *vcpu) |
67 | { | |
68 | u64 val; | |
69 | ||
70 | /* | |
71 | * We are about to set CPTR_EL2.TFP to trap all floating point | |
72 | * register accesses to EL2, however, the ARM ARM clearly states that | |
73 | * traps are only taken to EL2 if the operation would not otherwise | |
74 | * trap to EL1. Therefore, always make sure that for 32-bit guests, | |
75 | * we set FPEXC.EN to prevent traps to EL1, when setting the TFP bit. | |
76 | */ | |
77 | val = vcpu->arch.hcr_el2; | |
78 | if (!(val & HCR_RW)) { | |
79 | write_sysreg(1 << 30, fpexc32_el2); | |
80 | isb(); | |
81 | } | |
82 | write_sysreg(val, hcr_el2); | |
83 | /* Trap on AArch32 cp15 c15 accesses (EL1 or EL0) */ | |
84 | write_sysreg(1 << 15, hstr_el2); | |
d692b8ad SZ |
85 | /* Make sure we trap PMU access from EL0 to EL2 */ |
86 | write_sysreg(ARMV8_PMU_USERENR_MASK, pmuserenr_el0); | |
68908bf7 MZ |
87 | write_sysreg(vcpu->arch.mdcr_el2, mdcr_el2); |
88 | __activate_traps_arch()(); | |
89 | } | |
a7e0ac29 | 90 | |
68908bf7 MZ |
91 | static void __hyp_text __deactivate_traps_vhe(void) |
92 | { | |
93 | extern char vectors[]; /* kernel exception vectors */ | |
a7e0ac29 | 94 | |
68908bf7 MZ |
95 | write_sysreg(HCR_HOST_VHE_FLAGS, hcr_el2); |
96 | write_sysreg(CPACR_EL1_FPEN, cpacr_el1); | |
97 | write_sysreg(vectors, vbar_el1); | |
be901e9b MZ |
98 | } |
99 | ||
68908bf7 | 100 | static void __hyp_text __deactivate_traps_nvhe(void) |
be901e9b MZ |
101 | { |
102 | write_sysreg(HCR_RW, hcr_el2); | |
68908bf7 MZ |
103 | write_sysreg(CPTR_EL2_DEFAULT, cptr_el2); |
104 | } | |
105 | ||
106 | static hyp_alternate_select(__deactivate_traps_arch, | |
107 | __deactivate_traps_nvhe, __deactivate_traps_vhe, | |
108 | ARM64_HAS_VIRT_HOST_EXTN); | |
109 | ||
110 | static void __hyp_text __deactivate_traps(struct kvm_vcpu *vcpu) | |
111 | { | |
112 | __deactivate_traps_arch()(); | |
be901e9b MZ |
113 | write_sysreg(0, hstr_el2); |
114 | write_sysreg(read_sysreg(mdcr_el2) & MDCR_EL2_HPMN_MASK, mdcr_el2); | |
d692b8ad | 115 | write_sysreg(0, pmuserenr_el0); |
be901e9b MZ |
116 | } |
117 | ||
118 | static void __hyp_text __activate_vm(struct kvm_vcpu *vcpu) | |
119 | { | |
120 | struct kvm *kvm = kern_hyp_va(vcpu->kvm); | |
121 | write_sysreg(kvm->arch.vttbr, vttbr_el2); | |
122 | } | |
123 | ||
124 | static void __hyp_text __deactivate_vm(struct kvm_vcpu *vcpu) | |
125 | { | |
126 | write_sysreg(0, vttbr_el2); | |
127 | } | |
128 | ||
129 | static hyp_alternate_select(__vgic_call_save_state, | |
130 | __vgic_v2_save_state, __vgic_v3_save_state, | |
131 | ARM64_HAS_SYSREG_GIC_CPUIF); | |
132 | ||
133 | static hyp_alternate_select(__vgic_call_restore_state, | |
134 | __vgic_v2_restore_state, __vgic_v3_restore_state, | |
135 | ARM64_HAS_SYSREG_GIC_CPUIF); | |
136 | ||
137 | static void __hyp_text __vgic_save_state(struct kvm_vcpu *vcpu) | |
138 | { | |
139 | __vgic_call_save_state()(vcpu); | |
140 | write_sysreg(read_sysreg(hcr_el2) & ~HCR_INT_OVERRIDE, hcr_el2); | |
141 | } | |
142 | ||
143 | static void __hyp_text __vgic_restore_state(struct kvm_vcpu *vcpu) | |
144 | { | |
145 | u64 val; | |
146 | ||
147 | val = read_sysreg(hcr_el2); | |
148 | val |= HCR_INT_OVERRIDE; | |
149 | val |= vcpu->arch.irq_lines; | |
150 | write_sysreg(val, hcr_el2); | |
151 | ||
152 | __vgic_call_restore_state()(vcpu); | |
153 | } | |
154 | ||
5f05a72a MZ |
155 | static bool __hyp_text __true_value(void) |
156 | { | |
157 | return true; | |
158 | } | |
159 | ||
160 | static bool __hyp_text __false_value(void) | |
161 | { | |
162 | return false; | |
163 | } | |
164 | ||
165 | static hyp_alternate_select(__check_arm_834220, | |
166 | __false_value, __true_value, | |
167 | ARM64_WORKAROUND_834220); | |
168 | ||
169 | static bool __hyp_text __translate_far_to_hpfar(u64 far, u64 *hpfar) | |
170 | { | |
171 | u64 par, tmp; | |
172 | ||
173 | /* | |
174 | * Resolve the IPA the hard way using the guest VA. | |
175 | * | |
176 | * Stage-1 translation already validated the memory access | |
177 | * rights. As such, we can use the EL1 translation regime, and | |
178 | * don't have to distinguish between EL0 and EL1 access. | |
179 | * | |
180 | * We do need to save/restore PAR_EL1 though, as we haven't | |
181 | * saved the guest context yet, and we may return early... | |
182 | */ | |
183 | par = read_sysreg(par_el1); | |
184 | asm volatile("at s1e1r, %0" : : "r" (far)); | |
185 | isb(); | |
186 | ||
187 | tmp = read_sysreg(par_el1); | |
188 | write_sysreg(par, par_el1); | |
189 | ||
190 | if (unlikely(tmp & 1)) | |
191 | return false; /* Translation failed, back to guest */ | |
192 | ||
193 | /* Convert PAR to HPFAR format */ | |
194 | *hpfar = ((tmp >> 12) & ((1UL << 36) - 1)) << 4; | |
195 | return true; | |
196 | } | |
197 | ||
198 | static bool __hyp_text __populate_fault_info(struct kvm_vcpu *vcpu) | |
199 | { | |
200 | u64 esr = read_sysreg_el2(esr); | |
561454e2 | 201 | u8 ec = ESR_ELx_EC(esr); |
5f05a72a MZ |
202 | u64 hpfar, far; |
203 | ||
204 | vcpu->arch.fault.esr_el2 = esr; | |
205 | ||
206 | if (ec != ESR_ELx_EC_DABT_LOW && ec != ESR_ELx_EC_IABT_LOW) | |
207 | return true; | |
208 | ||
209 | far = read_sysreg_el2(far); | |
210 | ||
211 | /* | |
212 | * The HPFAR can be invalid if the stage 2 fault did not | |
213 | * happen during a stage 1 page table walk (the ESR_EL2.S1PTW | |
214 | * bit is clear) and one of the two following cases are true: | |
215 | * 1. The fault was due to a permission fault | |
216 | * 2. The processor carries errata 834220 | |
217 | * | |
218 | * Therefore, for all non S1PTW faults where we either have a | |
219 | * permission fault or the errata workaround is enabled, we | |
220 | * resolve the IPA using the AT instruction. | |
221 | */ | |
222 | if (!(esr & ESR_ELx_S1PTW) && | |
223 | (__check_arm_834220()() || (esr & ESR_ELx_FSC_TYPE) == FSC_PERM)) { | |
224 | if (!__translate_far_to_hpfar(far, &hpfar)) | |
225 | return false; | |
226 | } else { | |
227 | hpfar = read_sysreg(hpfar_el2); | |
228 | } | |
229 | ||
230 | vcpu->arch.fault.far_el2 = far; | |
231 | vcpu->arch.fault.hpfar_el2 = hpfar; | |
232 | return true; | |
233 | } | |
234 | ||
3ffa75cd | 235 | static int __hyp_text __guest_run(struct kvm_vcpu *vcpu) |
be901e9b MZ |
236 | { |
237 | struct kvm_cpu_context *host_ctxt; | |
238 | struct kvm_cpu_context *guest_ctxt; | |
c13d1683 | 239 | bool fp_enabled; |
be901e9b MZ |
240 | u64 exit_code; |
241 | ||
242 | vcpu = kern_hyp_va(vcpu); | |
243 | write_sysreg(vcpu, tpidr_el2); | |
244 | ||
245 | host_ctxt = kern_hyp_va(vcpu->arch.host_cpu_context); | |
246 | guest_ctxt = &vcpu->arch.ctxt; | |
247 | ||
edef528d | 248 | __sysreg_save_host_state(host_ctxt); |
be901e9b MZ |
249 | __debug_cond_save_host_state(vcpu); |
250 | ||
251 | __activate_traps(vcpu); | |
252 | __activate_vm(vcpu); | |
253 | ||
254 | __vgic_restore_state(vcpu); | |
255 | __timer_restore_state(vcpu); | |
256 | ||
257 | /* | |
258 | * We must restore the 32-bit state before the sysregs, thanks | |
674e7012 | 259 | * to erratum #852523 (Cortex-A57) or #853709 (Cortex-A72). |
be901e9b MZ |
260 | */ |
261 | __sysreg32_restore_state(vcpu); | |
edef528d | 262 | __sysreg_restore_guest_state(guest_ctxt); |
be901e9b MZ |
263 | __debug_restore_state(vcpu, kern_hyp_va(vcpu->arch.debug_ptr), guest_ctxt); |
264 | ||
265 | /* Jump in the fire! */ | |
5f05a72a | 266 | again: |
be901e9b MZ |
267 | exit_code = __guest_enter(vcpu, host_ctxt); |
268 | /* And we're baaack! */ | |
269 | ||
5f05a72a MZ |
270 | if (exit_code == ARM_EXCEPTION_TRAP && !__populate_fault_info(vcpu)) |
271 | goto again; | |
272 | ||
c13d1683 MZ |
273 | fp_enabled = __fpsimd_enabled(); |
274 | ||
edef528d | 275 | __sysreg_save_guest_state(guest_ctxt); |
be901e9b MZ |
276 | __sysreg32_save_state(vcpu); |
277 | __timer_save_state(vcpu); | |
278 | __vgic_save_state(vcpu); | |
279 | ||
280 | __deactivate_traps(vcpu); | |
281 | __deactivate_vm(vcpu); | |
282 | ||
edef528d | 283 | __sysreg_restore_host_state(host_ctxt); |
be901e9b | 284 | |
c13d1683 MZ |
285 | if (fp_enabled) { |
286 | __fpsimd_save_state(&guest_ctxt->gp_regs.fp_regs); | |
287 | __fpsimd_restore_state(&host_ctxt->gp_regs.fp_regs); | |
288 | } | |
289 | ||
be901e9b MZ |
290 | __debug_save_state(vcpu, kern_hyp_va(vcpu->arch.debug_ptr), guest_ctxt); |
291 | __debug_cond_restore_host_state(vcpu); | |
292 | ||
293 | return exit_code; | |
294 | } | |
53fd5b64 | 295 | |
3ffa75cd | 296 | __alias(__guest_run) int __kvm_vcpu_run(struct kvm_vcpu *vcpu); |
044ac37d | 297 | |
53fd5b64 MZ |
298 | static const char __hyp_panic_string[] = "HYP panic:\nPS:%08llx PC:%016llx ESR:%08llx\nFAR:%016llx HPFAR:%016llx PAR:%016llx\nVCPU:%p\n"; |
299 | ||
253dcbd3 | 300 | static void __hyp_text __hyp_call_panic_nvhe(u64 spsr, u64 elr, u64 par) |
53fd5b64 | 301 | { |
cf7df13d | 302 | unsigned long str_va; |
253dcbd3 | 303 | |
cf7df13d MZ |
304 | /* |
305 | * Force the panic string to be loaded from the literal pool, | |
306 | * making sure it is a kernel address and not a PC-relative | |
307 | * reference. | |
308 | */ | |
309 | asm volatile("ldr %0, =__hyp_panic_string" : "=r" (str_va)); | |
310 | ||
311 | __hyp_do_panic(str_va, | |
253dcbd3 MZ |
312 | spsr, elr, |
313 | read_sysreg(esr_el2), read_sysreg_el2(far), | |
314 | read_sysreg(hpfar_el2), par, | |
315 | (void *)read_sysreg(tpidr_el2)); | |
316 | } | |
317 | ||
318 | static void __hyp_text __hyp_call_panic_vhe(u64 spsr, u64 elr, u64 par) | |
319 | { | |
320 | panic(__hyp_panic_string, | |
321 | spsr, elr, | |
322 | read_sysreg_el2(esr), read_sysreg_el2(far), | |
323 | read_sysreg(hpfar_el2), par, | |
324 | (void *)read_sysreg(tpidr_el2)); | |
325 | } | |
326 | ||
327 | static hyp_alternate_select(__hyp_call_panic, | |
328 | __hyp_call_panic_nvhe, __hyp_call_panic_vhe, | |
329 | ARM64_HAS_VIRT_HOST_EXTN); | |
330 | ||
331 | void __hyp_text __noreturn __hyp_panic(void) | |
332 | { | |
333 | u64 spsr = read_sysreg_el2(spsr); | |
334 | u64 elr = read_sysreg_el2(elr); | |
53fd5b64 MZ |
335 | u64 par = read_sysreg(par_el1); |
336 | ||
337 | if (read_sysreg(vttbr_el2)) { | |
338 | struct kvm_vcpu *vcpu; | |
339 | struct kvm_cpu_context *host_ctxt; | |
340 | ||
341 | vcpu = (struct kvm_vcpu *)read_sysreg(tpidr_el2); | |
342 | host_ctxt = kern_hyp_va(vcpu->arch.host_cpu_context); | |
343 | __deactivate_traps(vcpu); | |
344 | __deactivate_vm(vcpu); | |
edef528d | 345 | __sysreg_restore_host_state(host_ctxt); |
53fd5b64 MZ |
346 | } |
347 | ||
348 | /* Call panic for real */ | |
253dcbd3 | 349 | __hyp_call_panic()(spsr, elr, par); |
53fd5b64 MZ |
350 | |
351 | unreachable(); | |
352 | } |