Commit | Line | Data |
---|---|---|
caab277b | 1 | // SPDX-License-Identifier: GPL-2.0-only |
7c8c5e6a MZ |
2 | /* |
3 | * Copyright (C) 2012,2013 - ARM Ltd | |
4 | * Author: Marc Zyngier <marc.zyngier@arm.com> | |
5 | * | |
6 | * Derived from arch/arm/kvm/coproc.c: | |
7 | * Copyright (C) 2012 - Virtual Open Systems and Columbia University | |
8 | * Authors: Rusty Russell <rusty@rustcorp.com.au> | |
9 | * Christoffer Dall <c.dall@virtualopensystems.com> | |
7c8c5e6a MZ |
10 | */ |
11 | ||
c8857935 | 12 | #include <linux/bitfield.h> |
623eefa8 | 13 | #include <linux/bsearch.h> |
7af0c253 | 14 | #include <linux/cacheinfo.h> |
89176658 | 15 | #include <linux/debugfs.h> |
7c8c5e6a | 16 | #include <linux/kvm_host.h> |
c6d01a94 | 17 | #include <linux/mm.h> |
07d79fe7 | 18 | #include <linux/printk.h> |
7c8c5e6a | 19 | #include <linux/uaccess.h> |
96c2f033 | 20 | #include <linux/irqchip/arm-gic-v3.h> |
c6d01a94 | 21 | |
126d7d7c | 22 | #include <asm/arm_pmuv3.h> |
7c8c5e6a MZ |
23 | #include <asm/cacheflush.h> |
24 | #include <asm/cputype.h> | |
0c557ed4 | 25 | #include <asm/debug-monitors.h> |
c6d01a94 MR |
26 | #include <asm/esr.h> |
27 | #include <asm/kvm_arm.h> | |
c6d01a94 | 28 | #include <asm/kvm_emulate.h> |
d47533da | 29 | #include <asm/kvm_hyp.h> |
c6d01a94 | 30 | #include <asm/kvm_mmu.h> |
6ff9dc23 | 31 | #include <asm/kvm_nested.h> |
ab946834 | 32 | #include <asm/perf_event.h> |
1f3d8699 | 33 | #include <asm/sysreg.h> |
c6d01a94 | 34 | |
7c8c5e6a MZ |
35 | #include <trace/events/kvm.h> |
36 | ||
37 | #include "sys_regs.h" | |
3e6245eb | 38 | #include "vgic/vgic.h" |
7c8c5e6a | 39 | |
eef8c85a AB |
40 | #include "trace.h" |
41 | ||
7c8c5e6a | 42 | /* |
62a89c44 MZ |
43 | * For AArch32, we only take care of what is being trapped. Anything |
44 | * that has to do with init and userspace access has to go via the | |
45 | * 64bit interface. | |
7c8c5e6a MZ |
46 | */ |
47 | ||
f24adc65 | 48 | static u64 sys_reg_to_index(const struct sys_reg_desc *reg); |
c118cead JZ |
49 | static int set_id_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd, |
50 | u64 val); | |
f24adc65 | 51 | |
4a999a1d MZ |
52 | static bool undef_access(struct kvm_vcpu *vcpu, struct sys_reg_params *p, |
53 | const struct sys_reg_desc *r) | |
54 | { | |
55 | kvm_inject_undefined(vcpu); | |
56 | return false; | |
57 | } | |
58 | ||
2733dd10 MZ |
59 | static bool bad_trap(struct kvm_vcpu *vcpu, |
60 | struct sys_reg_params *params, | |
61 | const struct sys_reg_desc *r, | |
62 | const char *msg) | |
7b5b4df1 | 63 | { |
2733dd10 | 64 | WARN_ONCE(1, "Unexpected %s\n", msg); |
7b5b4df1 | 65 | print_sys_reg_instr(params); |
cd08d321 | 66 | return undef_access(vcpu, params, r); |
7b5b4df1 MZ |
67 | } |
68 | ||
2733dd10 MZ |
69 | static bool read_from_write_only(struct kvm_vcpu *vcpu, |
70 | struct sys_reg_params *params, | |
71 | const struct sys_reg_desc *r) | |
72 | { | |
73 | return bad_trap(vcpu, params, r, | |
74 | "sys_reg read to write-only register"); | |
75 | } | |
76 | ||
7b1dba1f MZ |
77 | static bool write_to_read_only(struct kvm_vcpu *vcpu, |
78 | struct sys_reg_params *params, | |
79 | const struct sys_reg_desc *r) | |
80 | { | |
2733dd10 MZ |
81 | return bad_trap(vcpu, params, r, |
82 | "sys_reg write to read-only register"); | |
7b1dba1f MZ |
83 | } |
84 | ||
fedc6123 MZ |
85 | #define PURE_EL2_SYSREG(el2) \ |
86 | case el2: { \ | |
87 | *el1r = el2; \ | |
88 | return true; \ | |
89 | } | |
90 | ||
91 | #define MAPPED_EL2_SYSREG(el2, el1, fn) \ | |
92 | case el2: { \ | |
93 | *xlate = fn; \ | |
94 | *el1r = el1; \ | |
95 | return true; \ | |
96 | } | |
97 | ||
98 | static bool get_el2_to_el1_mapping(unsigned int reg, | |
99 | unsigned int *el1r, u64 (**xlate)(u64)) | |
100 | { | |
101 | switch (reg) { | |
102 | PURE_EL2_SYSREG( VPIDR_EL2 ); | |
103 | PURE_EL2_SYSREG( VMPIDR_EL2 ); | |
104 | PURE_EL2_SYSREG( ACTLR_EL2 ); | |
105 | PURE_EL2_SYSREG( HCR_EL2 ); | |
106 | PURE_EL2_SYSREG( MDCR_EL2 ); | |
107 | PURE_EL2_SYSREG( HSTR_EL2 ); | |
108 | PURE_EL2_SYSREG( HACR_EL2 ); | |
109 | PURE_EL2_SYSREG( VTTBR_EL2 ); | |
110 | PURE_EL2_SYSREG( VTCR_EL2 ); | |
111 | PURE_EL2_SYSREG( RVBAR_EL2 ); | |
112 | PURE_EL2_SYSREG( TPIDR_EL2 ); | |
113 | PURE_EL2_SYSREG( HPFAR_EL2 ); | |
dfeb9168 MZ |
114 | PURE_EL2_SYSREG( HCRX_EL2 ); |
115 | PURE_EL2_SYSREG( HFGRTR_EL2 ); | |
116 | PURE_EL2_SYSREG( HFGWTR_EL2 ); | |
117 | PURE_EL2_SYSREG( HFGITR_EL2 ); | |
118 | PURE_EL2_SYSREG( HDFGRTR_EL2 ); | |
119 | PURE_EL2_SYSREG( HDFGWTR_EL2 ); | |
120 | PURE_EL2_SYSREG( HAFGRTR_EL2 ); | |
121 | PURE_EL2_SYSREG( CNTVOFF_EL2 ); | |
fedc6123 MZ |
122 | PURE_EL2_SYSREG( CNTHCTL_EL2 ); |
123 | MAPPED_EL2_SYSREG(SCTLR_EL2, SCTLR_EL1, | |
124 | translate_sctlr_el2_to_sctlr_el1 ); | |
125 | MAPPED_EL2_SYSREG(CPTR_EL2, CPACR_EL1, | |
126 | translate_cptr_el2_to_cpacr_el1 ); | |
127 | MAPPED_EL2_SYSREG(TTBR0_EL2, TTBR0_EL1, | |
128 | translate_ttbr0_el2_to_ttbr0_el1 ); | |
129 | MAPPED_EL2_SYSREG(TTBR1_EL2, TTBR1_EL1, NULL ); | |
130 | MAPPED_EL2_SYSREG(TCR_EL2, TCR_EL1, | |
131 | translate_tcr_el2_to_tcr_el1 ); | |
132 | MAPPED_EL2_SYSREG(VBAR_EL2, VBAR_EL1, NULL ); | |
133 | MAPPED_EL2_SYSREG(AFSR0_EL2, AFSR0_EL1, NULL ); | |
134 | MAPPED_EL2_SYSREG(AFSR1_EL2, AFSR1_EL1, NULL ); | |
135 | MAPPED_EL2_SYSREG(ESR_EL2, ESR_EL1, NULL ); | |
136 | MAPPED_EL2_SYSREG(FAR_EL2, FAR_EL1, NULL ); | |
137 | MAPPED_EL2_SYSREG(MAIR_EL2, MAIR_EL1, NULL ); | |
69c19e04 | 138 | MAPPED_EL2_SYSREG(TCR2_EL2, TCR2_EL1, NULL ); |
5f8d5a15 MZ |
139 | MAPPED_EL2_SYSREG(PIR_EL2, PIR_EL1, NULL ); |
140 | MAPPED_EL2_SYSREG(PIRE0_EL2, PIRE0_EL1, NULL ); | |
5970e990 | 141 | MAPPED_EL2_SYSREG(POR_EL2, POR_EL1, NULL ); |
fedc6123 MZ |
142 | MAPPED_EL2_SYSREG(AMAIR_EL2, AMAIR_EL1, NULL ); |
143 | MAPPED_EL2_SYSREG(ELR_EL2, ELR_EL1, NULL ); | |
144 | MAPPED_EL2_SYSREG(SPSR_EL2, SPSR_EL1, NULL ); | |
b3d29a82 | 145 | MAPPED_EL2_SYSREG(ZCR_EL2, ZCR_EL1, NULL ); |
dfeb9168 | 146 | MAPPED_EL2_SYSREG(CONTEXTIDR_EL2, CONTEXTIDR_EL1, NULL ); |
fedc6123 MZ |
147 | default: |
148 | return false; | |
149 | } | |
7b1dba1f MZ |
150 | } |
151 | ||
7ea90bdd MZ |
152 | u64 vcpu_read_sys_reg(const struct kvm_vcpu *vcpu, int reg) |
153 | { | |
154 | u64 val = 0x8badf00d8badf00d; | |
fedc6123 MZ |
155 | u64 (*xlate)(u64) = NULL; |
156 | unsigned int el1r; | |
157 | ||
158 | if (!vcpu_get_flag(vcpu, SYSREGS_ON_CPU)) | |
159 | goto memory_read; | |
7ea90bdd | 160 | |
fedc6123 MZ |
161 | if (unlikely(get_el2_to_el1_mapping(reg, &el1r, &xlate))) { |
162 | if (!is_hyp_ctxt(vcpu)) | |
163 | goto memory_read; | |
164 | ||
164b5e20 MZ |
165 | /* |
166 | * CNTHCTL_EL2 requires some special treatment to | |
167 | * account for the bits that can be set via CNTKCTL_EL1. | |
168 | */ | |
169 | switch (reg) { | |
170 | case CNTHCTL_EL2: | |
171 | if (vcpu_el2_e2h_is_set(vcpu)) { | |
172 | val = read_sysreg_el1(SYS_CNTKCTL); | |
173 | val &= CNTKCTL_VALID_BITS; | |
174 | val |= __vcpu_sys_reg(vcpu, reg) & ~CNTKCTL_VALID_BITS; | |
175 | return val; | |
176 | } | |
177 | break; | |
178 | } | |
179 | ||
fedc6123 MZ |
180 | /* |
181 | * If this register does not have an EL1 counterpart, | |
182 | * then read the stored EL2 version. | |
183 | */ | |
184 | if (reg == el1r) | |
185 | goto memory_read; | |
186 | ||
187 | /* | |
188 | * If we have a non-VHE guest and that the sysreg | |
189 | * requires translation to be used at EL1, use the | |
190 | * in-memory copy instead. | |
191 | */ | |
192 | if (!vcpu_el2_e2h_is_set(vcpu) && xlate) | |
193 | goto memory_read; | |
194 | ||
195 | /* Get the current version of the EL1 counterpart. */ | |
196 | WARN_ON(!__vcpu_read_sys_reg_from_cpu(el1r, &val)); | |
a0162020 MZ |
197 | if (reg >= __SANITISED_REG_START__) |
198 | val = kvm_vcpu_apply_reg_masks(vcpu, reg, val); | |
199 | ||
7ea90bdd | 200 | return val; |
fedc6123 | 201 | } |
7ea90bdd | 202 | |
fedc6123 MZ |
203 | /* EL1 register can't be on the CPU if the guest is in vEL2. */ |
204 | if (unlikely(is_hyp_ctxt(vcpu))) | |
205 | goto memory_read; | |
206 | ||
207 | if (__vcpu_read_sys_reg_from_cpu(reg, &val)) | |
208 | return val; | |
209 | ||
210 | memory_read: | |
7ea90bdd MZ |
211 | return __vcpu_sys_reg(vcpu, reg); |
212 | } | |
213 | ||
214 | void vcpu_write_sys_reg(struct kvm_vcpu *vcpu, u64 val, int reg) | |
215 | { | |
fedc6123 MZ |
216 | u64 (*xlate)(u64) = NULL; |
217 | unsigned int el1r; | |
218 | ||
219 | if (!vcpu_get_flag(vcpu, SYSREGS_ON_CPU)) | |
220 | goto memory_write; | |
221 | ||
222 | if (unlikely(get_el2_to_el1_mapping(reg, &el1r, &xlate))) { | |
223 | if (!is_hyp_ctxt(vcpu)) | |
224 | goto memory_write; | |
225 | ||
226 | /* | |
227 | * Always store a copy of the write to memory to avoid having | |
228 | * to reverse-translate virtual EL2 system registers for a | |
229 | * non-VHE guest hypervisor. | |
230 | */ | |
6678791e | 231 | __vcpu_assign_sys_reg(vcpu, reg, val); |
fedc6123 | 232 | |
164b5e20 MZ |
233 | switch (reg) { |
234 | case CNTHCTL_EL2: | |
235 | /* | |
236 | * If E2H=0, CNHTCTL_EL2 is a pure shadow register. | |
237 | * Otherwise, some of the bits are backed by | |
238 | * CNTKCTL_EL1, while the rest is kept in memory. | |
239 | * Yes, this is fun stuff. | |
240 | */ | |
241 | if (vcpu_el2_e2h_is_set(vcpu)) | |
242 | write_sysreg_el1(val, SYS_CNTKCTL); | |
243 | return; | |
244 | } | |
245 | ||
fedc6123 MZ |
246 | /* No EL1 counterpart? We're done here.? */ |
247 | if (reg == el1r) | |
248 | return; | |
249 | ||
250 | if (!vcpu_el2_e2h_is_set(vcpu) && xlate) | |
251 | val = xlate(val); | |
252 | ||
253 | /* Redirect this to the EL1 version of the register. */ | |
254 | WARN_ON(!__vcpu_write_sys_reg_to_cpu(val, el1r)); | |
255 | return; | |
256 | } | |
257 | ||
258 | /* EL1 register can't be on the CPU if the guest is in vEL2. */ | |
259 | if (unlikely(is_hyp_ctxt(vcpu))) | |
260 | goto memory_write; | |
261 | ||
262 | if (__vcpu_write_sys_reg_to_cpu(val, reg)) | |
7ea90bdd MZ |
263 | return; |
264 | ||
fedc6123 | 265 | memory_write: |
6678791e | 266 | __vcpu_assign_sys_reg(vcpu, reg, val); |
d47533da CD |
267 | } |
268 | ||
7c8c5e6a | 269 | /* CSSELR values; used to index KVM_REG_ARM_DEMUX_ID_CCSIDR */ |
c73a4416 | 270 | #define CSSELR_MAX 14 |
7c8c5e6a | 271 | |
7af0c253 AO |
272 | /* |
273 | * Returns the minimum line size for the selected cache, expressed as | |
274 | * Log2(bytes). | |
275 | */ | |
276 | static u8 get_min_cache_line_size(bool icache) | |
277 | { | |
278 | u64 ctr = read_sanitised_ftr_reg(SYS_CTR_EL0); | |
279 | u8 field; | |
280 | ||
281 | if (icache) | |
282 | field = SYS_FIELD_GET(CTR_EL0, IminLine, ctr); | |
283 | else | |
284 | field = SYS_FIELD_GET(CTR_EL0, DminLine, ctr); | |
285 | ||
286 | /* | |
287 | * Cache line size is represented as Log2(words) in CTR_EL0. | |
288 | * Log2(bytes) can be derived with the following: | |
289 | * | |
290 | * Log2(words) + 2 = Log2(bytes / 4) + 2 | |
291 | * = Log2(bytes) - 2 + 2 | |
292 | * = Log2(bytes) | |
293 | */ | |
294 | return field + 2; | |
295 | } | |
296 | ||
7c8c5e6a | 297 | /* Which cache CCSIDR represents depends on CSSELR value. */ |
7af0c253 AO |
298 | static u32 get_ccsidr(struct kvm_vcpu *vcpu, u32 csselr) |
299 | { | |
300 | u8 line_size; | |
301 | ||
302 | if (vcpu->arch.ccsidr) | |
303 | return vcpu->arch.ccsidr[csselr]; | |
304 | ||
305 | line_size = get_min_cache_line_size(csselr & CSSELR_EL1_InD); | |
306 | ||
307 | /* | |
308 | * Fabricate a CCSIDR value as the overriding value does not exist. | |
309 | * The real CCSIDR value will not be used as it can vary by the | |
310 | * physical CPU which the vcpu currently resides in. | |
311 | * | |
312 | * The line size is determined with get_min_cache_line_size(), which | |
313 | * should be valid for all CPUs even if they have different cache | |
314 | * configuration. | |
315 | * | |
316 | * The associativity bits are cleared, meaning the geometry of all data | |
317 | * and unified caches (which are guaranteed to be PIPT and thus | |
318 | * non-aliasing) are 1 set and 1 way. | |
319 | * Guests should not be doing cache operations by set/way at all, and | |
320 | * for this reason, we trap them and attempt to infer the intent, so | |
321 | * that we can flush the entire guest's address space at the appropriate | |
322 | * time. The exposed geometry minimizes the number of the traps. | |
323 | * [If guests should attempt to infer aliasing properties from the | |
324 | * geometry (which is not permitted by the architecture), they would | |
325 | * only do so for virtually indexed caches.] | |
326 | * | |
327 | * We don't check if the cache level exists as it is allowed to return | |
328 | * an UNKNOWN value if not. | |
329 | */ | |
330 | return SYS_FIELD_PREP(CCSIDR_EL1, LineSize, line_size - 4); | |
331 | } | |
332 | ||
333 | static int set_ccsidr(struct kvm_vcpu *vcpu, u32 csselr, u32 val) | |
7c8c5e6a | 334 | { |
7af0c253 AO |
335 | u8 line_size = FIELD_GET(CCSIDR_EL1_LineSize, val) + 4; |
336 | u32 *ccsidr = vcpu->arch.ccsidr; | |
337 | u32 i; | |
338 | ||
339 | if ((val & CCSIDR_EL1_RES0) || | |
340 | line_size < get_min_cache_line_size(csselr & CSSELR_EL1_InD)) | |
341 | return -EINVAL; | |
342 | ||
343 | if (!ccsidr) { | |
344 | if (val == get_ccsidr(vcpu, csselr)) | |
345 | return 0; | |
7c8c5e6a | 346 | |
5f623a59 | 347 | ccsidr = kmalloc_array(CSSELR_MAX, sizeof(u32), GFP_KERNEL_ACCOUNT); |
7af0c253 AO |
348 | if (!ccsidr) |
349 | return -ENOMEM; | |
7c8c5e6a | 350 | |
7af0c253 AO |
351 | for (i = 0; i < CSSELR_MAX; i++) |
352 | ccsidr[i] = get_ccsidr(vcpu, i); | |
353 | ||
354 | vcpu->arch.ccsidr = ccsidr; | |
355 | } | |
7c8c5e6a | 356 | |
7af0c253 | 357 | ccsidr[csselr] = val; |
7c8c5e6a | 358 | |
7af0c253 | 359 | return 0; |
7c8c5e6a MZ |
360 | } |
361 | ||
6ff9dc23 JL |
362 | static bool access_rw(struct kvm_vcpu *vcpu, |
363 | struct sys_reg_params *p, | |
364 | const struct sys_reg_desc *r) | |
365 | { | |
366 | if (p->is_write) | |
367 | vcpu_write_sys_reg(vcpu, p->regval, r->reg); | |
368 | else | |
369 | p->regval = vcpu_read_sys_reg(vcpu, r->reg); | |
370 | ||
371 | return true; | |
372 | } | |
373 | ||
3c1e7165 MZ |
374 | /* |
375 | * See note at ARMv7 ARM B1.14.4 (TL;DR: S/W ops are not easily virtualized). | |
376 | */ | |
7c8c5e6a | 377 | static bool access_dcsw(struct kvm_vcpu *vcpu, |
3fec037d | 378 | struct sys_reg_params *p, |
7c8c5e6a MZ |
379 | const struct sys_reg_desc *r) |
380 | { | |
7c8c5e6a | 381 | if (!p->is_write) |
e7f1d1ee | 382 | return read_from_write_only(vcpu, p, r); |
7c8c5e6a | 383 | |
09605e94 MZ |
384 | /* |
385 | * Only track S/W ops if we don't have FWB. It still indicates | |
386 | * that the guest is a bit broken (S/W operations should only | |
387 | * be done by firmware, knowing that there is only a single | |
388 | * CPU left in the system, and certainly not from non-secure | |
389 | * software). | |
390 | */ | |
d8569fba | 391 | if (!cpus_have_final_cap(ARM64_HAS_STAGE2_FWB)) |
09605e94 MZ |
392 | kvm_set_way_flush(vcpu); |
393 | ||
7c8c5e6a MZ |
394 | return true; |
395 | } | |
396 | ||
d282fa3c MZ |
397 | static bool access_dcgsw(struct kvm_vcpu *vcpu, |
398 | struct sys_reg_params *p, | |
399 | const struct sys_reg_desc *r) | |
400 | { | |
cd08d321 MZ |
401 | if (!kvm_has_mte(vcpu->kvm)) |
402 | return undef_access(vcpu, p, r); | |
d282fa3c MZ |
403 | |
404 | /* Treat MTE S/W ops as we treat the classic ones: with contempt */ | |
405 | return access_dcsw(vcpu, p, r); | |
406 | } | |
407 | ||
b1ea1d76 MZ |
408 | static void get_access_mask(const struct sys_reg_desc *r, u64 *mask, u64 *shift) |
409 | { | |
410 | switch (r->aarch32_map) { | |
411 | case AA32_LO: | |
412 | *mask = GENMASK_ULL(31, 0); | |
413 | *shift = 0; | |
414 | break; | |
415 | case AA32_HI: | |
416 | *mask = GENMASK_ULL(63, 32); | |
417 | *shift = 32; | |
418 | break; | |
419 | default: | |
420 | *mask = GENMASK_ULL(63, 0); | |
421 | *shift = 0; | |
422 | break; | |
423 | } | |
424 | } | |
425 | ||
4d44923b MZ |
426 | /* |
427 | * Generic accessor for VM registers. Only called as long as HCR_TVM | |
3c1e7165 MZ |
428 | * is set. If the guest enables the MMU, we stop trapping the VM |
429 | * sys_regs and leave it in complete control of the caches. | |
4d44923b MZ |
430 | */ |
431 | static bool access_vm_reg(struct kvm_vcpu *vcpu, | |
3fec037d | 432 | struct sys_reg_params *p, |
4d44923b MZ |
433 | const struct sys_reg_desc *r) |
434 | { | |
3c1e7165 | 435 | bool was_enabled = vcpu_has_cache_enabled(vcpu); |
b1ea1d76 | 436 | u64 val, mask, shift; |
4d44923b MZ |
437 | |
438 | BUG_ON(!p->is_write); | |
439 | ||
b1ea1d76 | 440 | get_access_mask(r, &mask, &shift); |
52f6c4f0 | 441 | |
b1ea1d76 MZ |
442 | if (~mask) { |
443 | val = vcpu_read_sys_reg(vcpu, r->reg); | |
444 | val &= ~mask; | |
dedf97e8 | 445 | } else { |
b1ea1d76 | 446 | val = 0; |
dedf97e8 | 447 | } |
b1ea1d76 MZ |
448 | |
449 | val |= (p->regval & (mask >> shift)) << shift; | |
450 | vcpu_write_sys_reg(vcpu, val, r->reg); | |
f0a3eaff | 451 | |
3c1e7165 | 452 | kvm_toggle_cache(vcpu, was_enabled); |
4d44923b MZ |
453 | return true; |
454 | } | |
455 | ||
af473829 JM |
456 | static bool access_actlr(struct kvm_vcpu *vcpu, |
457 | struct sys_reg_params *p, | |
458 | const struct sys_reg_desc *r) | |
459 | { | |
b1ea1d76 MZ |
460 | u64 mask, shift; |
461 | ||
af473829 JM |
462 | if (p->is_write) |
463 | return ignore_write(vcpu, p); | |
464 | ||
b1ea1d76 MZ |
465 | get_access_mask(r, &mask, &shift); |
466 | p->regval = (vcpu_read_sys_reg(vcpu, r->reg) & mask) >> shift; | |
af473829 JM |
467 | |
468 | return true; | |
469 | } | |
470 | ||
6d52f35a AP |
471 | /* |
472 | * Trap handler for the GICv3 SGI generation system register. | |
473 | * Forward the request to the VGIC emulation. | |
474 | * The cp15_64 code makes sure this automatically works | |
475 | * for both AArch64 and AArch32 accesses. | |
476 | */ | |
477 | static bool access_gic_sgi(struct kvm_vcpu *vcpu, | |
3fec037d | 478 | struct sys_reg_params *p, |
6d52f35a AP |
479 | const struct sys_reg_desc *r) |
480 | { | |
03bd646d MZ |
481 | bool g1; |
482 | ||
cd08d321 MZ |
483 | if (!kvm_has_gicv3(vcpu->kvm)) |
484 | return undef_access(vcpu, p, r); | |
3e6245eb | 485 | |
6d52f35a | 486 | if (!p->is_write) |
e7f1d1ee | 487 | return read_from_write_only(vcpu, p, r); |
6d52f35a | 488 | |
03bd646d MZ |
489 | /* |
490 | * In a system where GICD_CTLR.DS=1, a ICC_SGI0R_EL1 access generates | |
491 | * Group0 SGIs only, while ICC_SGI1R_EL1 can generate either group, | |
492 | * depending on the SGI configuration. ICC_ASGI1R_EL1 is effectively | |
493 | * equivalent to ICC_SGI0R_EL1, as there is no "alternative" secure | |
494 | * group. | |
495 | */ | |
50f30453 | 496 | if (p->Op0 == 0) { /* AArch32 */ |
03bd646d MZ |
497 | switch (p->Op1) { |
498 | default: /* Keep GCC quiet */ | |
499 | case 0: /* ICC_SGI1R */ | |
500 | g1 = true; | |
501 | break; | |
502 | case 1: /* ICC_ASGI1R */ | |
503 | case 2: /* ICC_SGI0R */ | |
504 | g1 = false; | |
505 | break; | |
506 | } | |
50f30453 | 507 | } else { /* AArch64 */ |
03bd646d MZ |
508 | switch (p->Op2) { |
509 | default: /* Keep GCC quiet */ | |
510 | case 5: /* ICC_SGI1R_EL1 */ | |
511 | g1 = true; | |
512 | break; | |
513 | case 6: /* ICC_ASGI1R_EL1 */ | |
514 | case 7: /* ICC_SGI0R_EL1 */ | |
515 | g1 = false; | |
516 | break; | |
517 | } | |
518 | } | |
519 | ||
520 | vgic_v3_dispatch_sgi(vcpu, p->regval, g1); | |
6d52f35a AP |
521 | |
522 | return true; | |
523 | } | |
524 | ||
b34f2bcb MZ |
525 | static bool access_gic_sre(struct kvm_vcpu *vcpu, |
526 | struct sys_reg_params *p, | |
527 | const struct sys_reg_desc *r) | |
528 | { | |
4a999a1d MZ |
529 | if (!kvm_has_gicv3(vcpu->kvm)) |
530 | return undef_access(vcpu, p, r); | |
531 | ||
b34f2bcb MZ |
532 | if (p->is_write) |
533 | return ignore_write(vcpu, p); | |
534 | ||
96c2f033 MZ |
535 | if (p->Op1 == 4) { /* ICC_SRE_EL2 */ |
536 | p->regval = (ICC_SRE_EL2_ENABLE | ICC_SRE_EL2_SRE | | |
537 | ICC_SRE_EL1_DIB | ICC_SRE_EL1_DFB); | |
538 | } else { /* ICC_SRE_EL1 */ | |
539 | p->regval = vcpu->arch.vgic_cpu.vgic_v3.vgic_sre; | |
540 | } | |
541 | ||
b34f2bcb MZ |
542 | return true; |
543 | } | |
544 | ||
7609c125 | 545 | static bool trap_raz_wi(struct kvm_vcpu *vcpu, |
3fec037d | 546 | struct sys_reg_params *p, |
7609c125 | 547 | const struct sys_reg_desc *r) |
7c8c5e6a MZ |
548 | { |
549 | if (p->is_write) | |
550 | return ignore_write(vcpu, p); | |
551 | else | |
552 | return read_zero(vcpu, p); | |
553 | } | |
554 | ||
22925521 MZ |
555 | /* |
556 | * ARMv8.1 mandates at least a trivial LORegion implementation, where all the | |
557 | * RW registers are RES0 (which we can implement as RAZ/WI). On an ARMv8.0 | |
558 | * system, these registers should UNDEF. LORID_EL1 being a RO register, we | |
559 | * treat it separately. | |
560 | */ | |
561 | static bool trap_loregion(struct kvm_vcpu *vcpu, | |
562 | struct sys_reg_params *p, | |
563 | const struct sys_reg_desc *r) | |
cc33c4e2 | 564 | { |
7ba8b438 | 565 | u32 sr = reg_to_encoding(r); |
22925521 | 566 | |
cd08d321 MZ |
567 | if (!kvm_has_feat(vcpu->kvm, ID_AA64MMFR1_EL1, LO, IMP)) |
568 | return undef_access(vcpu, p, r); | |
22925521 MZ |
569 | |
570 | if (p->is_write && sr == SYS_LORID_EL1) | |
571 | return write_to_read_only(vcpu, p, r); | |
572 | ||
573 | return trap_raz_wi(vcpu, p, r); | |
cc33c4e2 MR |
574 | } |
575 | ||
f24adc65 OU |
576 | static bool trap_oslar_el1(struct kvm_vcpu *vcpu, |
577 | struct sys_reg_params *p, | |
578 | const struct sys_reg_desc *r) | |
579 | { | |
f24adc65 OU |
580 | if (!p->is_write) |
581 | return read_from_write_only(vcpu, p, r); | |
582 | ||
06d22a9c | 583 | kvm_debug_handle_oslar(vcpu, p->regval); |
f24adc65 OU |
584 | return true; |
585 | } | |
586 | ||
0c557ed4 | 587 | static bool trap_oslsr_el1(struct kvm_vcpu *vcpu, |
3fec037d | 588 | struct sys_reg_params *p, |
0c557ed4 MZ |
589 | const struct sys_reg_desc *r) |
590 | { | |
d42e2671 | 591 | if (p->is_write) |
e2ffceaa | 592 | return write_to_read_only(vcpu, p, r); |
d42e2671 OU |
593 | |
594 | p->regval = __vcpu_sys_reg(vcpu, r->reg); | |
595 | return true; | |
596 | } | |
597 | ||
598 | static int set_oslsr_el1(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd, | |
978ceeb3 | 599 | u64 val) |
d42e2671 | 600 | { |
f24adc65 OU |
601 | /* |
602 | * The only modifiable bit is the OSLK bit. Refuse the write if | |
603 | * userspace attempts to change any other bit in the register. | |
604 | */ | |
187de7c2 | 605 | if ((val ^ rd->val) & ~OSLSR_EL1_OSLK) |
d42e2671 OU |
606 | return -EINVAL; |
607 | ||
6678791e | 608 | __vcpu_assign_sys_reg(vcpu, rd->reg, val); |
d42e2671 | 609 | return 0; |
0c557ed4 MZ |
610 | } |
611 | ||
612 | static bool trap_dbgauthstatus_el1(struct kvm_vcpu *vcpu, | |
3fec037d | 613 | struct sys_reg_params *p, |
0c557ed4 MZ |
614 | const struct sys_reg_desc *r) |
615 | { | |
616 | if (p->is_write) { | |
617 | return ignore_write(vcpu, p); | |
618 | } else { | |
1f3d8699 | 619 | p->regval = read_sysreg(dbgauthstatus_el1); |
0c557ed4 MZ |
620 | return true; |
621 | } | |
622 | } | |
623 | ||
0c557ed4 | 624 | static bool trap_debug_regs(struct kvm_vcpu *vcpu, |
3fec037d | 625 | struct sys_reg_params *p, |
0c557ed4 MZ |
626 | const struct sys_reg_desc *r) |
627 | { | |
6ff9dc23 | 628 | access_rw(vcpu, p, r); |
eef8c85a | 629 | |
cd9b1010 | 630 | kvm_debug_set_guest_ownership(vcpu); |
0c557ed4 MZ |
631 | return true; |
632 | } | |
633 | ||
84e690bf AB |
634 | /* |
635 | * reg_to_dbg/dbg_to_reg | |
636 | * | |
637 | * A 32 bit write to a debug register leave top bits alone | |
638 | * A 32 bit read from a debug register only returns the bottom bits | |
84e690bf | 639 | */ |
281243cb MZ |
640 | static void reg_to_dbg(struct kvm_vcpu *vcpu, |
641 | struct sys_reg_params *p, | |
1da42c34 | 642 | const struct sys_reg_desc *rd, |
281243cb | 643 | u64 *dbg_reg) |
84e690bf | 644 | { |
1da42c34 | 645 | u64 mask, shift, val; |
84e690bf | 646 | |
1da42c34 | 647 | get_access_mask(rd, &mask, &shift); |
84e690bf | 648 | |
1da42c34 MZ |
649 | val = *dbg_reg; |
650 | val &= ~mask; | |
651 | val |= (p->regval & (mask >> shift)) << shift; | |
84e690bf | 652 | *dbg_reg = val; |
84e690bf AB |
653 | } |
654 | ||
281243cb MZ |
655 | static void dbg_to_reg(struct kvm_vcpu *vcpu, |
656 | struct sys_reg_params *p, | |
1da42c34 | 657 | const struct sys_reg_desc *rd, |
281243cb | 658 | u64 *dbg_reg) |
84e690bf | 659 | { |
1da42c34 MZ |
660 | u64 mask, shift; |
661 | ||
662 | get_access_mask(rd, &mask, &shift); | |
663 | p->regval = (*dbg_reg & mask) >> shift; | |
84e690bf AB |
664 | } |
665 | ||
3ce9f335 | 666 | static u64 *demux_wb_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd) |
84e690bf | 667 | { |
3ce9f335 | 668 | struct kvm_guest_debug_arch *dbg = &vcpu->arch.vcpu_debug_state; |
84e690bf | 669 | |
3ce9f335 OU |
670 | switch (rd->Op2) { |
671 | case 0b100: | |
672 | return &dbg->dbg_bvr[rd->CRm]; | |
673 | case 0b101: | |
674 | return &dbg->dbg_bcr[rd->CRm]; | |
675 | case 0b110: | |
676 | return &dbg->dbg_wvr[rd->CRm]; | |
677 | case 0b111: | |
678 | return &dbg->dbg_wcr[rd->CRm]; | |
679 | default: | |
680 | KVM_BUG_ON(1, vcpu->kvm); | |
681 | return NULL; | |
682 | } | |
84e690bf AB |
683 | } |
684 | ||
3ce9f335 OU |
685 | static bool trap_dbg_wb_reg(struct kvm_vcpu *vcpu, struct sys_reg_params *p, |
686 | const struct sys_reg_desc *rd) | |
84e690bf | 687 | { |
3ce9f335 | 688 | u64 *reg = demux_wb_reg(vcpu, rd); |
84e690bf | 689 | |
3ce9f335 OU |
690 | if (!reg) |
691 | return false; | |
84e690bf AB |
692 | |
693 | if (p->is_write) | |
3ce9f335 | 694 | reg_to_dbg(vcpu, p, rd, reg); |
84e690bf | 695 | else |
3ce9f335 | 696 | dbg_to_reg(vcpu, p, rd, reg); |
eef8c85a | 697 | |
c4a6ed85 | 698 | kvm_debug_set_guest_ownership(vcpu); |
84e690bf AB |
699 | return true; |
700 | } | |
701 | ||
3ce9f335 OU |
702 | static int set_dbg_wb_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd, |
703 | u64 val) | |
84e690bf | 704 | { |
3ce9f335 | 705 | u64 *reg = demux_wb_reg(vcpu, rd); |
84e690bf | 706 | |
3ce9f335 OU |
707 | if (!reg) |
708 | return -EINVAL; | |
84e690bf | 709 | |
3ce9f335 | 710 | *reg = val; |
84e690bf AB |
711 | return 0; |
712 | } | |
713 | ||
3ce9f335 OU |
714 | static int get_dbg_wb_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd, |
715 | u64 *val) | |
84e690bf | 716 | { |
3ce9f335 | 717 | u64 *reg = demux_wb_reg(vcpu, rd); |
84e690bf | 718 | |
3ce9f335 OU |
719 | if (!reg) |
720 | return -EINVAL; | |
84e690bf | 721 | |
3ce9f335 | 722 | *val = *reg; |
84e690bf AB |
723 | return 0; |
724 | } | |
725 | ||
3ce9f335 | 726 | static u64 reset_dbg_wb_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd) |
84e690bf | 727 | { |
3ce9f335 | 728 | u64 *reg = demux_wb_reg(vcpu, rd); |
84e690bf | 729 | |
3ce9f335 OU |
730 | /* |
731 | * Bail early if we couldn't find storage for the register, the | |
732 | * KVM_BUG_ON() in demux_wb_reg() will prevent this VM from ever | |
733 | * being run. | |
734 | */ | |
735 | if (!reg) | |
736 | return 0; | |
737 | ||
738 | *reg = rd->val; | |
d86cde6e | 739 | return rd->val; |
84e690bf AB |
740 | } |
741 | ||
d86cde6e | 742 | static u64 reset_amair_el1(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r) |
7c8c5e6a | 743 | { |
8d404c4c CD |
744 | u64 amair = read_sysreg(amair_el1); |
745 | vcpu_write_sys_reg(vcpu, amair, AMAIR_EL1); | |
d86cde6e | 746 | return amair; |
7c8c5e6a MZ |
747 | } |
748 | ||
d86cde6e | 749 | static u64 reset_actlr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r) |
af473829 JM |
750 | { |
751 | u64 actlr = read_sysreg(actlr_el1); | |
752 | vcpu_write_sys_reg(vcpu, actlr, ACTLR_EL1); | |
d86cde6e | 753 | return actlr; |
af473829 JM |
754 | } |
755 | ||
d86cde6e | 756 | static u64 reset_mpidr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r) |
7c8c5e6a | 757 | { |
4429fc64 AP |
758 | u64 mpidr; |
759 | ||
7c8c5e6a | 760 | /* |
4429fc64 AP |
761 | * Map the vcpu_id into the first three affinity level fields of |
762 | * the MPIDR. We limit the number of VCPUs in level 0 due to a | |
763 | * limitation to 16 CPUs in that level in the ICC_SGIxR registers | |
764 | * of the GICv3 to be able to address each CPU directly when | |
765 | * sending IPIs. | |
7c8c5e6a | 766 | */ |
4429fc64 AP |
767 | mpidr = (vcpu->vcpu_id & 0x0f) << MPIDR_LEVEL_SHIFT(0); |
768 | mpidr |= ((vcpu->vcpu_id >> 4) & 0xff) << MPIDR_LEVEL_SHIFT(1); | |
769 | mpidr |= ((vcpu->vcpu_id >> 12) & 0xff) << MPIDR_LEVEL_SHIFT(2); | |
d86cde6e JZ |
770 | mpidr |= (1ULL << 31); |
771 | vcpu_write_sys_reg(vcpu, mpidr, MPIDR_EL1); | |
772 | ||
773 | return mpidr; | |
7c8c5e6a MZ |
774 | } |
775 | ||
11663111 MZ |
776 | static unsigned int pmu_visibility(const struct kvm_vcpu *vcpu, |
777 | const struct sys_reg_desc *r) | |
778 | { | |
779 | if (kvm_vcpu_has_pmu(vcpu)) | |
780 | return 0; | |
781 | ||
782 | return REG_HIDDEN; | |
783 | } | |
784 | ||
d86cde6e | 785 | static u64 reset_pmu_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r) |
0ab410a9 | 786 | { |
ea9ca904 | 787 | u64 mask = BIT(ARMV8_PMU_CYCLE_IDX); |
f12b54d7 | 788 | u8 n = vcpu->kvm->arch.nr_pmu_counters; |
0ab410a9 | 789 | |
0ab410a9 MZ |
790 | if (n) |
791 | mask |= GENMASK(n - 1, 0); | |
792 | ||
793 | reset_unknown(vcpu, r); | |
8800b7c4 | 794 | __vcpu_rmw_sys_reg(vcpu, r->reg, &=, mask); |
d86cde6e JZ |
795 | |
796 | return __vcpu_sys_reg(vcpu, r->reg); | |
0ab410a9 MZ |
797 | } |
798 | ||
d86cde6e | 799 | static u64 reset_pmevcntr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r) |
0ab410a9 MZ |
800 | { |
801 | reset_unknown(vcpu, r); | |
8800b7c4 | 802 | __vcpu_rmw_sys_reg(vcpu, r->reg, &=, GENMASK(31, 0)); |
d86cde6e JZ |
803 | |
804 | return __vcpu_sys_reg(vcpu, r->reg); | |
0ab410a9 MZ |
805 | } |
806 | ||
d86cde6e | 807 | static u64 reset_pmevtyper(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r) |
0ab410a9 | 808 | { |
bc512d6a OU |
809 | /* This thing will UNDEF, who cares about the reset value? */ |
810 | if (!kvm_vcpu_has_pmu(vcpu)) | |
811 | return 0; | |
812 | ||
0ab410a9 | 813 | reset_unknown(vcpu, r); |
8800b7c4 | 814 | __vcpu_rmw_sys_reg(vcpu, r->reg, &=, kvm_pmu_evtyper_mask(vcpu->kvm)); |
d86cde6e JZ |
815 | |
816 | return __vcpu_sys_reg(vcpu, r->reg); | |
0ab410a9 MZ |
817 | } |
818 | ||
d86cde6e | 819 | static u64 reset_pmselr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r) |
0ab410a9 MZ |
820 | { |
821 | reset_unknown(vcpu, r); | |
8800b7c4 | 822 | __vcpu_rmw_sys_reg(vcpu, r->reg, &=, PMSELR_EL0_SEL_MASK); |
d86cde6e JZ |
823 | |
824 | return __vcpu_sys_reg(vcpu, r->reg); | |
0ab410a9 MZ |
825 | } |
826 | ||
d86cde6e | 827 | static u64 reset_pmcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r) |
ab946834 | 828 | { |
4d20debf | 829 | u64 pmcr = 0; |
ab946834 | 830 | |
f3c6efc7 | 831 | if (!kvm_supports_32bit_el0()) |
292e8f14 MZ |
832 | pmcr |= ARMV8_PMU_PMCR_LC; |
833 | ||
4d20debf RRA |
834 | /* |
835 | * The value of PMCR.N field is included when the | |
836 | * vCPU register is read via kvm_vcpu_read_pmcr(). | |
837 | */ | |
6678791e | 838 | __vcpu_assign_sys_reg(vcpu, r->reg, pmcr); |
d86cde6e JZ |
839 | |
840 | return __vcpu_sys_reg(vcpu, r->reg); | |
ab946834 SZ |
841 | } |
842 | ||
6c007036 | 843 | static bool check_pmu_access_disabled(struct kvm_vcpu *vcpu, u64 flags) |
d692b8ad | 844 | { |
8d404c4c | 845 | u64 reg = __vcpu_sys_reg(vcpu, PMUSERENR_EL0); |
7ded92e2 | 846 | bool enabled = (reg & flags) || vcpu_mode_priv(vcpu); |
d692b8ad | 847 | |
24d5950f MZ |
848 | if (!enabled) |
849 | kvm_inject_undefined(vcpu); | |
d692b8ad | 850 | |
6c007036 | 851 | return !enabled; |
d692b8ad SZ |
852 | } |
853 | ||
6c007036 | 854 | static bool pmu_access_el0_disabled(struct kvm_vcpu *vcpu) |
d692b8ad | 855 | { |
6c007036 MZ |
856 | return check_pmu_access_disabled(vcpu, ARMV8_PMU_USERENR_EN); |
857 | } | |
d692b8ad | 858 | |
6c007036 MZ |
859 | static bool pmu_write_swinc_el0_disabled(struct kvm_vcpu *vcpu) |
860 | { | |
861 | return check_pmu_access_disabled(vcpu, ARMV8_PMU_USERENR_SW | ARMV8_PMU_USERENR_EN); | |
d692b8ad SZ |
862 | } |
863 | ||
864 | static bool pmu_access_cycle_counter_el0_disabled(struct kvm_vcpu *vcpu) | |
865 | { | |
6c007036 | 866 | return check_pmu_access_disabled(vcpu, ARMV8_PMU_USERENR_CR | ARMV8_PMU_USERENR_EN); |
d692b8ad SZ |
867 | } |
868 | ||
869 | static bool pmu_access_event_counter_el0_disabled(struct kvm_vcpu *vcpu) | |
870 | { | |
6c007036 | 871 | return check_pmu_access_disabled(vcpu, ARMV8_PMU_USERENR_ER | ARMV8_PMU_USERENR_EN); |
d692b8ad SZ |
872 | } |
873 | ||
ab946834 SZ |
874 | static bool access_pmcr(struct kvm_vcpu *vcpu, struct sys_reg_params *p, |
875 | const struct sys_reg_desc *r) | |
876 | { | |
877 | u64 val; | |
878 | ||
d692b8ad SZ |
879 | if (pmu_access_el0_disabled(vcpu)) |
880 | return false; | |
881 | ||
ab946834 | 882 | if (p->is_write) { |
64d6820d MZ |
883 | /* |
884 | * Only update writeable bits of PMCR (continuing into | |
885 | * kvm_pmu_handle_pmcr() as well) | |
886 | */ | |
57fc267f | 887 | val = kvm_vcpu_read_pmcr(vcpu); |
ab946834 SZ |
888 | val &= ~ARMV8_PMU_PMCR_MASK; |
889 | val |= p->regval & ARMV8_PMU_PMCR_MASK; | |
f3c6efc7 | 890 | if (!kvm_supports_32bit_el0()) |
6f163714 | 891 | val |= ARMV8_PMU_PMCR_LC; |
76993739 | 892 | kvm_pmu_handle_pmcr(vcpu, val); |
ab946834 SZ |
893 | } else { |
894 | /* PMCR.P & PMCR.C are RAZ */ | |
57fc267f | 895 | val = kvm_vcpu_read_pmcr(vcpu) |
ab946834 SZ |
896 | & ~(ARMV8_PMU_PMCR_P | ARMV8_PMU_PMCR_C); |
897 | p->regval = val; | |
898 | } | |
899 | ||
900 | return true; | |
901 | } | |
902 | ||
3965c3ce SZ |
903 | static bool access_pmselr(struct kvm_vcpu *vcpu, struct sys_reg_params *p, |
904 | const struct sys_reg_desc *r) | |
905 | { | |
d692b8ad SZ |
906 | if (pmu_access_event_counter_el0_disabled(vcpu)) |
907 | return false; | |
908 | ||
3965c3ce | 909 | if (p->is_write) |
6678791e | 910 | __vcpu_assign_sys_reg(vcpu, PMSELR_EL0, p->regval); |
3965c3ce SZ |
911 | else |
912 | /* return PMSELR.SEL field */ | |
8d404c4c | 913 | p->regval = __vcpu_sys_reg(vcpu, PMSELR_EL0) |
f9b11aa0 | 914 | & PMSELR_EL0_SEL_MASK; |
3965c3ce SZ |
915 | |
916 | return true; | |
917 | } | |
918 | ||
a86b5505 SZ |
919 | static bool access_pmceid(struct kvm_vcpu *vcpu, struct sys_reg_params *p, |
920 | const struct sys_reg_desc *r) | |
921 | { | |
99b6a401 | 922 | u64 pmceid, mask, shift; |
a86b5505 | 923 | |
a86b5505 SZ |
924 | BUG_ON(p->is_write); |
925 | ||
d692b8ad SZ |
926 | if (pmu_access_el0_disabled(vcpu)) |
927 | return false; | |
928 | ||
99b6a401 MZ |
929 | get_access_mask(r, &mask, &shift); |
930 | ||
88865bec | 931 | pmceid = kvm_pmu_get_pmceid(vcpu, (p->Op2 & 1)); |
99b6a401 MZ |
932 | pmceid &= mask; |
933 | pmceid >>= shift; | |
a86b5505 SZ |
934 | |
935 | p->regval = pmceid; | |
936 | ||
937 | return true; | |
938 | } | |
939 | ||
051ff581 SZ |
940 | static bool pmu_counter_idx_valid(struct kvm_vcpu *vcpu, u64 idx) |
941 | { | |
942 | u64 pmcr, val; | |
943 | ||
57fc267f | 944 | pmcr = kvm_vcpu_read_pmcr(vcpu); |
62e1f212 | 945 | val = FIELD_GET(ARMV8_PMU_PMCR_N, pmcr); |
24d5950f MZ |
946 | if (idx >= val && idx != ARMV8_PMU_CYCLE_IDX) { |
947 | kvm_inject_undefined(vcpu); | |
051ff581 | 948 | return false; |
24d5950f | 949 | } |
051ff581 SZ |
950 | |
951 | return true; | |
952 | } | |
953 | ||
9228b261 RW |
954 | static int get_pmu_evcntr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r, |
955 | u64 *val) | |
956 | { | |
957 | u64 idx; | |
958 | ||
959 | if (r->CRn == 9 && r->CRm == 13 && r->Op2 == 0) | |
960 | /* PMCCNTR_EL0 */ | |
961 | idx = ARMV8_PMU_CYCLE_IDX; | |
962 | else | |
963 | /* PMEVCNTRn_EL0 */ | |
964 | idx = ((r->CRm & 3) << 3) | (r->Op2 & 7); | |
965 | ||
966 | *val = kvm_pmu_get_counter_value(vcpu, idx); | |
967 | return 0; | |
968 | } | |
969 | ||
64074ca8 AO |
970 | static int set_pmu_evcntr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r, |
971 | u64 val) | |
972 | { | |
973 | u64 idx; | |
974 | ||
975 | if (r->CRn == 9 && r->CRm == 13 && r->Op2 == 0) | |
976 | /* PMCCNTR_EL0 */ | |
977 | idx = ARMV8_PMU_CYCLE_IDX; | |
978 | else | |
979 | /* PMEVCNTRn_EL0 */ | |
980 | idx = ((r->CRm & 3) << 3) | (r->Op2 & 7); | |
981 | ||
982 | kvm_pmu_set_counter_value_user(vcpu, idx, val); | |
983 | return 0; | |
984 | } | |
985 | ||
051ff581 SZ |
986 | static bool access_pmu_evcntr(struct kvm_vcpu *vcpu, |
987 | struct sys_reg_params *p, | |
988 | const struct sys_reg_desc *r) | |
989 | { | |
a3da9358 | 990 | u64 idx = ~0UL; |
051ff581 SZ |
991 | |
992 | if (r->CRn == 9 && r->CRm == 13) { | |
993 | if (r->Op2 == 2) { | |
994 | /* PMXEVCNTR_EL0 */ | |
d692b8ad SZ |
995 | if (pmu_access_event_counter_el0_disabled(vcpu)) |
996 | return false; | |
997 | ||
f9b11aa0 RHA |
998 | idx = SYS_FIELD_GET(PMSELR_EL0, SEL, |
999 | __vcpu_sys_reg(vcpu, PMSELR_EL0)); | |
051ff581 SZ |
1000 | } else if (r->Op2 == 0) { |
1001 | /* PMCCNTR_EL0 */ | |
d692b8ad SZ |
1002 | if (pmu_access_cycle_counter_el0_disabled(vcpu)) |
1003 | return false; | |
1004 | ||
051ff581 | 1005 | idx = ARMV8_PMU_CYCLE_IDX; |
051ff581 | 1006 | } |
9e3f7a29 WH |
1007 | } else if (r->CRn == 0 && r->CRm == 9) { |
1008 | /* PMCCNTR */ | |
1009 | if (pmu_access_event_counter_el0_disabled(vcpu)) | |
1010 | return false; | |
1011 | ||
1012 | idx = ARMV8_PMU_CYCLE_IDX; | |
051ff581 SZ |
1013 | } else if (r->CRn == 14 && (r->CRm & 12) == 8) { |
1014 | /* PMEVCNTRn_EL0 */ | |
d692b8ad SZ |
1015 | if (pmu_access_event_counter_el0_disabled(vcpu)) |
1016 | return false; | |
1017 | ||
051ff581 | 1018 | idx = ((r->CRm & 3) << 3) | (r->Op2 & 7); |
051ff581 SZ |
1019 | } |
1020 | ||
a3da9358 MZ |
1021 | /* Catch any decoding mistake */ |
1022 | WARN_ON(idx == ~0UL); | |
1023 | ||
051ff581 SZ |
1024 | if (!pmu_counter_idx_valid(vcpu, idx)) |
1025 | return false; | |
1026 | ||
d692b8ad SZ |
1027 | if (p->is_write) { |
1028 | if (pmu_access_el0_disabled(vcpu)) | |
1029 | return false; | |
1030 | ||
051ff581 | 1031 | kvm_pmu_set_counter_value(vcpu, idx, p->regval); |
d692b8ad | 1032 | } else { |
051ff581 | 1033 | p->regval = kvm_pmu_get_counter_value(vcpu, idx); |
d692b8ad | 1034 | } |
051ff581 SZ |
1035 | |
1036 | return true; | |
1037 | } | |
1038 | ||
9feb21ac SZ |
1039 | static bool access_pmu_evtyper(struct kvm_vcpu *vcpu, struct sys_reg_params *p, |
1040 | const struct sys_reg_desc *r) | |
1041 | { | |
1042 | u64 idx, reg; | |
1043 | ||
d692b8ad SZ |
1044 | if (pmu_access_el0_disabled(vcpu)) |
1045 | return false; | |
1046 | ||
9feb21ac SZ |
1047 | if (r->CRn == 9 && r->CRm == 13 && r->Op2 == 1) { |
1048 | /* PMXEVTYPER_EL0 */ | |
f9b11aa0 | 1049 | idx = SYS_FIELD_GET(PMSELR_EL0, SEL, __vcpu_sys_reg(vcpu, PMSELR_EL0)); |
9feb21ac SZ |
1050 | reg = PMEVTYPER0_EL0 + idx; |
1051 | } else if (r->CRn == 14 && (r->CRm & 12) == 12) { | |
1052 | idx = ((r->CRm & 3) << 3) | (r->Op2 & 7); | |
1053 | if (idx == ARMV8_PMU_CYCLE_IDX) | |
1054 | reg = PMCCFILTR_EL0; | |
1055 | else | |
1056 | /* PMEVTYPERn_EL0 */ | |
1057 | reg = PMEVTYPER0_EL0 + idx; | |
1058 | } else { | |
1059 | BUG(); | |
1060 | } | |
1061 | ||
1062 | if (!pmu_counter_idx_valid(vcpu, idx)) | |
1063 | return false; | |
1064 | ||
1065 | if (p->is_write) { | |
1066 | kvm_pmu_set_counter_event_type(vcpu, p->regval, idx); | |
435e53fb | 1067 | kvm_vcpu_pmu_restore_guest(vcpu); |
9feb21ac | 1068 | } else { |
bc512d6a | 1069 | p->regval = __vcpu_sys_reg(vcpu, reg); |
9feb21ac SZ |
1070 | } |
1071 | ||
1072 | return true; | |
1073 | } | |
1074 | ||
a45f41d7 RRA |
1075 | static int set_pmreg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r, u64 val) |
1076 | { | |
f2aeb7bb | 1077 | u64 mask = kvm_pmu_accessible_counter_mask(vcpu); |
a45f41d7 | 1078 | |
6678791e | 1079 | __vcpu_assign_sys_reg(vcpu, r->reg, val & mask); |
1db4aaa0 | 1080 | kvm_make_request(KVM_REQ_RELOAD_PMU, vcpu); |
a45f41d7 RRA |
1081 | |
1082 | return 0; | |
1083 | } | |
1084 | ||
1085 | static int get_pmreg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r, u64 *val) | |
1086 | { | |
9a1c58cf | 1087 | u64 mask = kvm_pmu_accessible_counter_mask(vcpu); |
a45f41d7 RRA |
1088 | |
1089 | *val = __vcpu_sys_reg(vcpu, r->reg) & mask; | |
1090 | return 0; | |
1091 | } | |
1092 | ||
96b0eebc SZ |
1093 | static bool access_pmcnten(struct kvm_vcpu *vcpu, struct sys_reg_params *p, |
1094 | const struct sys_reg_desc *r) | |
1095 | { | |
1096 | u64 val, mask; | |
1097 | ||
d692b8ad SZ |
1098 | if (pmu_access_el0_disabled(vcpu)) |
1099 | return false; | |
1100 | ||
9a1c58cf | 1101 | mask = kvm_pmu_accessible_counter_mask(vcpu); |
96b0eebc SZ |
1102 | if (p->is_write) { |
1103 | val = p->regval & mask; | |
e22c3695 | 1104 | if (r->Op2 & 0x1) |
96b0eebc | 1105 | /* accessing PMCNTENSET_EL0 */ |
8800b7c4 | 1106 | __vcpu_rmw_sys_reg(vcpu, PMCNTENSET_EL0, |=, val); |
e22c3695 | 1107 | else |
96b0eebc | 1108 | /* accessing PMCNTENCLR_EL0 */ |
8800b7c4 | 1109 | __vcpu_rmw_sys_reg(vcpu, PMCNTENSET_EL0, &=, ~val); |
e22c3695 OU |
1110 | |
1111 | kvm_pmu_reprogram_counter_mask(vcpu, val); | |
96b0eebc | 1112 | } else { |
f5eff400 | 1113 | p->regval = __vcpu_sys_reg(vcpu, PMCNTENSET_EL0); |
96b0eebc SZ |
1114 | } |
1115 | ||
1116 | return true; | |
1117 | } | |
1118 | ||
9db52c78 SZ |
1119 | static bool access_pminten(struct kvm_vcpu *vcpu, struct sys_reg_params *p, |
1120 | const struct sys_reg_desc *r) | |
1121 | { | |
9a1c58cf | 1122 | u64 mask = kvm_pmu_accessible_counter_mask(vcpu); |
9db52c78 | 1123 | |
b0737e99 | 1124 | if (check_pmu_access_disabled(vcpu, 0)) |
d692b8ad SZ |
1125 | return false; |
1126 | ||
9db52c78 SZ |
1127 | if (p->is_write) { |
1128 | u64 val = p->regval & mask; | |
1129 | ||
1130 | if (r->Op2 & 0x1) | |
1131 | /* accessing PMINTENSET_EL1 */ | |
8800b7c4 | 1132 | __vcpu_rmw_sys_reg(vcpu, PMINTENSET_EL1, |=, val); |
9db52c78 SZ |
1133 | else |
1134 | /* accessing PMINTENCLR_EL1 */ | |
8800b7c4 | 1135 | __vcpu_rmw_sys_reg(vcpu, PMINTENSET_EL1, &=, ~val); |
9db52c78 | 1136 | } else { |
f5eff400 | 1137 | p->regval = __vcpu_sys_reg(vcpu, PMINTENSET_EL1); |
9db52c78 SZ |
1138 | } |
1139 | ||
1140 | return true; | |
1141 | } | |
1142 | ||
76d883c4 SZ |
1143 | static bool access_pmovs(struct kvm_vcpu *vcpu, struct sys_reg_params *p, |
1144 | const struct sys_reg_desc *r) | |
1145 | { | |
9a1c58cf | 1146 | u64 mask = kvm_pmu_accessible_counter_mask(vcpu); |
76d883c4 | 1147 | |
d692b8ad SZ |
1148 | if (pmu_access_el0_disabled(vcpu)) |
1149 | return false; | |
1150 | ||
76d883c4 SZ |
1151 | if (p->is_write) { |
1152 | if (r->CRm & 0x2) | |
1153 | /* accessing PMOVSSET_EL0 */ | |
8800b7c4 | 1154 | __vcpu_rmw_sys_reg(vcpu, PMOVSSET_EL0, |=, (p->regval & mask)); |
76d883c4 SZ |
1155 | else |
1156 | /* accessing PMOVSCLR_EL0 */ | |
8800b7c4 | 1157 | __vcpu_rmw_sys_reg(vcpu, PMOVSSET_EL0, &=, ~(p->regval & mask)); |
76d883c4 | 1158 | } else { |
f5eff400 | 1159 | p->regval = __vcpu_sys_reg(vcpu, PMOVSSET_EL0); |
76d883c4 SZ |
1160 | } |
1161 | ||
1162 | return true; | |
1163 | } | |
1164 | ||
7a0adc70 SZ |
1165 | static bool access_pmswinc(struct kvm_vcpu *vcpu, struct sys_reg_params *p, |
1166 | const struct sys_reg_desc *r) | |
1167 | { | |
1168 | u64 mask; | |
1169 | ||
e0443230 | 1170 | if (!p->is_write) |
e7f1d1ee | 1171 | return read_from_write_only(vcpu, p, r); |
e0443230 | 1172 | |
d692b8ad SZ |
1173 | if (pmu_write_swinc_el0_disabled(vcpu)) |
1174 | return false; | |
1175 | ||
9a1c58cf | 1176 | mask = kvm_pmu_accessible_counter_mask(vcpu); |
e0443230 MZ |
1177 | kvm_pmu_software_increment(vcpu, p->regval & mask); |
1178 | return true; | |
7a0adc70 SZ |
1179 | } |
1180 | ||
d692b8ad SZ |
1181 | static bool access_pmuserenr(struct kvm_vcpu *vcpu, struct sys_reg_params *p, |
1182 | const struct sys_reg_desc *r) | |
1183 | { | |
d692b8ad | 1184 | if (p->is_write) { |
cd08d321 MZ |
1185 | if (!vcpu_mode_priv(vcpu)) |
1186 | return undef_access(vcpu, p, r); | |
d692b8ad | 1187 | |
6678791e MZ |
1188 | __vcpu_assign_sys_reg(vcpu, PMUSERENR_EL0, |
1189 | (p->regval & ARMV8_PMU_USERENR_MASK)); | |
d692b8ad | 1190 | } else { |
8d404c4c | 1191 | p->regval = __vcpu_sys_reg(vcpu, PMUSERENR_EL0) |
d692b8ad SZ |
1192 | & ARMV8_PMU_USERENR_MASK; |
1193 | } | |
1194 | ||
1195 | return true; | |
1196 | } | |
1197 | ||
4d20debf RRA |
1198 | static int get_pmcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r, |
1199 | u64 *val) | |
1200 | { | |
1201 | *val = kvm_vcpu_read_pmcr(vcpu); | |
1202 | return 0; | |
1203 | } | |
1204 | ||
ea9ca904 RW |
1205 | static int set_pmcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r, |
1206 | u64 val) | |
1207 | { | |
62e1f212 | 1208 | u8 new_n = FIELD_GET(ARMV8_PMU_PMCR_N, val); |
ea9ca904 RW |
1209 | struct kvm *kvm = vcpu->kvm; |
1210 | ||
1211 | mutex_lock(&kvm->arch.config_lock); | |
1212 | ||
1213 | /* | |
1214 | * The vCPU can't have more counters than the PMU hardware | |
1215 | * implements. Ignore this error to maintain compatibility | |
1216 | * with the existing KVM behavior. | |
1217 | */ | |
1218 | if (!kvm_vm_has_ran_once(kvm) && | |
cd84a42c | 1219 | !vcpu_has_nv(vcpu) && |
ea9ca904 | 1220 | new_n <= kvm_arm_pmu_get_max_counters(kvm)) |
f12b54d7 | 1221 | kvm->arch.nr_pmu_counters = new_n; |
ea9ca904 RW |
1222 | |
1223 | mutex_unlock(&kvm->arch.config_lock); | |
1224 | ||
1225 | /* | |
1226 | * Ignore writes to RES0 bits, read only bits that are cleared on | |
1227 | * vCPU reset, and writable bits that KVM doesn't support yet. | |
1228 | * (i.e. only PMCR.N and bits [7:0] are mutable from userspace) | |
1229 | * The LP bit is RES0 when FEAT_PMUv3p5 is not supported on the vCPU. | |
1230 | * But, we leave the bit as it is here, as the vCPU's PMUver might | |
1231 | * be changed later (NOTE: the bit will be cleared on first vCPU run | |
1232 | * if necessary). | |
1233 | */ | |
1234 | val &= ARMV8_PMU_PMCR_MASK; | |
1235 | ||
1236 | /* The LC bit is RES1 when AArch32 is not supported */ | |
1237 | if (!kvm_supports_32bit_el0()) | |
1238 | val |= ARMV8_PMU_PMCR_LC; | |
1239 | ||
6678791e | 1240 | __vcpu_assign_sys_reg(vcpu, r->reg, val); |
1db4aaa0 AO |
1241 | kvm_make_request(KVM_REQ_RELOAD_PMU, vcpu); |
1242 | ||
ea9ca904 RW |
1243 | return 0; |
1244 | } | |
1245 | ||
0c557ed4 MZ |
1246 | /* Silly macro to expand the DBG{BCR,BVR,WVR,WCR}n_EL1 registers in one go */ |
1247 | #define DBG_BCR_BVR_WCR_WVR_EL1(n) \ | |
ee1b64e6 | 1248 | { SYS_DESC(SYS_DBGBVRn_EL1(n)), \ |
3ce9f335 OU |
1249 | trap_dbg_wb_reg, reset_dbg_wb_reg, 0, 0, \ |
1250 | get_dbg_wb_reg, set_dbg_wb_reg }, \ | |
ee1b64e6 | 1251 | { SYS_DESC(SYS_DBGBCRn_EL1(n)), \ |
3ce9f335 OU |
1252 | trap_dbg_wb_reg, reset_dbg_wb_reg, 0, 0, \ |
1253 | get_dbg_wb_reg, set_dbg_wb_reg }, \ | |
ee1b64e6 | 1254 | { SYS_DESC(SYS_DBGWVRn_EL1(n)), \ |
3ce9f335 OU |
1255 | trap_dbg_wb_reg, reset_dbg_wb_reg, 0, 0, \ |
1256 | get_dbg_wb_reg, set_dbg_wb_reg }, \ | |
ee1b64e6 | 1257 | { SYS_DESC(SYS_DBGWCRn_EL1(n)), \ |
3ce9f335 OU |
1258 | trap_dbg_wb_reg, reset_dbg_wb_reg, 0, 0, \ |
1259 | get_dbg_wb_reg, set_dbg_wb_reg } | |
0c557ed4 | 1260 | |
9d2a55b4 XC |
1261 | #define PMU_SYS_REG(name) \ |
1262 | SYS_DESC(SYS_##name), .reset = reset_pmu_reg, \ | |
1263 | .visibility = pmu_visibility | |
11663111 | 1264 | |
051ff581 SZ |
1265 | /* Macro to expand the PMEVCNTRn_EL0 register */ |
1266 | #define PMU_PMEVCNTR_EL0(n) \ | |
9d2a55b4 | 1267 | { PMU_SYS_REG(PMEVCNTRn_EL0(n)), \ |
9228b261 | 1268 | .reset = reset_pmevcntr, .get_user = get_pmu_evcntr, \ |
64074ca8 | 1269 | .set_user = set_pmu_evcntr, \ |
11663111 | 1270 | .access = access_pmu_evcntr, .reg = (PMEVCNTR0_EL0 + n), } |
051ff581 | 1271 | |
9feb21ac SZ |
1272 | /* Macro to expand the PMEVTYPERn_EL0 register */ |
1273 | #define PMU_PMEVTYPER_EL0(n) \ | |
9d2a55b4 | 1274 | { PMU_SYS_REG(PMEVTYPERn_EL0(n)), \ |
0ab410a9 | 1275 | .reset = reset_pmevtyper, \ |
11663111 | 1276 | .access = access_pmu_evtyper, .reg = (PMEVTYPER0_EL0 + n), } |
9feb21ac | 1277 | |
4fcdf106 | 1278 | /* Macro to expand the AMU counter and type registers*/ |
338b1793 MZ |
1279 | #define AMU_AMEVCNTR0_EL0(n) { SYS_DESC(SYS_AMEVCNTR0_EL0(n)), undef_access } |
1280 | #define AMU_AMEVTYPER0_EL0(n) { SYS_DESC(SYS_AMEVTYPER0_EL0(n)), undef_access } | |
1281 | #define AMU_AMEVCNTR1_EL0(n) { SYS_DESC(SYS_AMEVCNTR1_EL0(n)), undef_access } | |
1282 | #define AMU_AMEVTYPER1_EL0(n) { SYS_DESC(SYS_AMEVTYPER1_EL0(n)), undef_access } | |
384b40ca MR |
1283 | |
1284 | static unsigned int ptrauth_visibility(const struct kvm_vcpu *vcpu, | |
1285 | const struct sys_reg_desc *rd) | |
1286 | { | |
01fe5ace | 1287 | return vcpu_has_ptrauth(vcpu) ? 0 : REG_HIDDEN; |
384b40ca MR |
1288 | } |
1289 | ||
338b1793 MZ |
1290 | /* |
1291 | * If we land here on a PtrAuth access, that is because we didn't | |
1292 | * fixup the access on exit by allowing the PtrAuth sysregs. The only | |
1293 | * way this happens is when the guest does not have PtrAuth support | |
1294 | * enabled. | |
1295 | */ | |
384b40ca | 1296 | #define __PTRAUTH_KEY(k) \ |
338b1793 | 1297 | { SYS_DESC(SYS_## k), undef_access, reset_unknown, k, \ |
384b40ca MR |
1298 | .visibility = ptrauth_visibility} |
1299 | ||
1300 | #define PTRAUTH_KEY(k) \ | |
1301 | __PTRAUTH_KEY(k ## KEYLO_EL1), \ | |
1302 | __PTRAUTH_KEY(k ## KEYHI_EL1) | |
1303 | ||
84135d3d AP |
1304 | static bool access_arch_timer(struct kvm_vcpu *vcpu, |
1305 | struct sys_reg_params *p, | |
1306 | const struct sys_reg_desc *r) | |
c9a3c58f | 1307 | { |
84135d3d AP |
1308 | enum kvm_arch_timers tmr; |
1309 | enum kvm_arch_timer_regs treg; | |
1310 | u64 reg = reg_to_encoding(r); | |
7b6b4631 | 1311 | |
84135d3d AP |
1312 | switch (reg) { |
1313 | case SYS_CNTP_TVAL_EL0: | |
b59dbb91 MZ |
1314 | if (is_hyp_ctxt(vcpu) && vcpu_el2_e2h_is_set(vcpu)) |
1315 | tmr = TIMER_HPTIMER; | |
1316 | else | |
1317 | tmr = TIMER_PTIMER; | |
1318 | treg = TIMER_REG_TVAL; | |
1319 | break; | |
1320 | ||
1321 | case SYS_CNTV_TVAL_EL0: | |
1322 | if (is_hyp_ctxt(vcpu) && vcpu_el2_e2h_is_set(vcpu)) | |
1323 | tmr = TIMER_HVTIMER; | |
1324 | else | |
1325 | tmr = TIMER_VTIMER; | |
1326 | treg = TIMER_REG_TVAL; | |
1327 | break; | |
1328 | ||
84135d3d | 1329 | case SYS_AARCH32_CNTP_TVAL: |
b59dbb91 | 1330 | case SYS_CNTP_TVAL_EL02: |
84135d3d AP |
1331 | tmr = TIMER_PTIMER; |
1332 | treg = TIMER_REG_TVAL; | |
1333 | break; | |
b59dbb91 MZ |
1334 | |
1335 | case SYS_CNTV_TVAL_EL02: | |
1336 | tmr = TIMER_VTIMER; | |
1337 | treg = TIMER_REG_TVAL; | |
1338 | break; | |
1339 | ||
1340 | case SYS_CNTHP_TVAL_EL2: | |
1341 | tmr = TIMER_HPTIMER; | |
1342 | treg = TIMER_REG_TVAL; | |
1343 | break; | |
1344 | ||
1345 | case SYS_CNTHV_TVAL_EL2: | |
1346 | tmr = TIMER_HVTIMER; | |
1347 | treg = TIMER_REG_TVAL; | |
1348 | break; | |
1349 | ||
84135d3d | 1350 | case SYS_CNTP_CTL_EL0: |
b59dbb91 MZ |
1351 | if (is_hyp_ctxt(vcpu) && vcpu_el2_e2h_is_set(vcpu)) |
1352 | tmr = TIMER_HPTIMER; | |
1353 | else | |
1354 | tmr = TIMER_PTIMER; | |
1355 | treg = TIMER_REG_CTL; | |
1356 | break; | |
1357 | ||
1358 | case SYS_CNTV_CTL_EL0: | |
1359 | if (is_hyp_ctxt(vcpu) && vcpu_el2_e2h_is_set(vcpu)) | |
1360 | tmr = TIMER_HVTIMER; | |
1361 | else | |
1362 | tmr = TIMER_VTIMER; | |
1363 | treg = TIMER_REG_CTL; | |
1364 | break; | |
1365 | ||
84135d3d | 1366 | case SYS_AARCH32_CNTP_CTL: |
b59dbb91 | 1367 | case SYS_CNTP_CTL_EL02: |
84135d3d AP |
1368 | tmr = TIMER_PTIMER; |
1369 | treg = TIMER_REG_CTL; | |
1370 | break; | |
b59dbb91 MZ |
1371 | |
1372 | case SYS_CNTV_CTL_EL02: | |
1373 | tmr = TIMER_VTIMER; | |
1374 | treg = TIMER_REG_CTL; | |
1375 | break; | |
1376 | ||
1377 | case SYS_CNTHP_CTL_EL2: | |
1378 | tmr = TIMER_HPTIMER; | |
1379 | treg = TIMER_REG_CTL; | |
1380 | break; | |
1381 | ||
1382 | case SYS_CNTHV_CTL_EL2: | |
1383 | tmr = TIMER_HVTIMER; | |
1384 | treg = TIMER_REG_CTL; | |
1385 | break; | |
1386 | ||
84135d3d | 1387 | case SYS_CNTP_CVAL_EL0: |
b59dbb91 MZ |
1388 | if (is_hyp_ctxt(vcpu) && vcpu_el2_e2h_is_set(vcpu)) |
1389 | tmr = TIMER_HPTIMER; | |
1390 | else | |
1391 | tmr = TIMER_PTIMER; | |
1392 | treg = TIMER_REG_CVAL; | |
1393 | break; | |
1394 | ||
1395 | case SYS_CNTV_CVAL_EL0: | |
1396 | if (is_hyp_ctxt(vcpu) && vcpu_el2_e2h_is_set(vcpu)) | |
1397 | tmr = TIMER_HVTIMER; | |
1398 | else | |
1399 | tmr = TIMER_VTIMER; | |
1400 | treg = TIMER_REG_CVAL; | |
1401 | break; | |
1402 | ||
84135d3d | 1403 | case SYS_AARCH32_CNTP_CVAL: |
b59dbb91 | 1404 | case SYS_CNTP_CVAL_EL02: |
84135d3d AP |
1405 | tmr = TIMER_PTIMER; |
1406 | treg = TIMER_REG_CVAL; | |
1407 | break; | |
b59dbb91 MZ |
1408 | |
1409 | case SYS_CNTV_CVAL_EL02: | |
1410 | tmr = TIMER_VTIMER; | |
1411 | treg = TIMER_REG_CVAL; | |
1412 | break; | |
1413 | ||
1414 | case SYS_CNTHP_CVAL_EL2: | |
1415 | tmr = TIMER_HPTIMER; | |
1416 | treg = TIMER_REG_CVAL; | |
1417 | break; | |
1418 | ||
1419 | case SYS_CNTHV_CVAL_EL2: | |
1420 | tmr = TIMER_HVTIMER; | |
1421 | treg = TIMER_REG_CVAL; | |
1422 | break; | |
1423 | ||
c605ee24 MZ |
1424 | case SYS_CNTPCT_EL0: |
1425 | case SYS_CNTPCTSS_EL0: | |
b59dbb91 MZ |
1426 | if (is_hyp_ctxt(vcpu)) |
1427 | tmr = TIMER_HPTIMER; | |
1428 | else | |
1429 | tmr = TIMER_PTIMER; | |
1430 | treg = TIMER_REG_CNT; | |
1431 | break; | |
1432 | ||
c605ee24 | 1433 | case SYS_AARCH32_CNTPCT: |
b59dbb91 | 1434 | case SYS_AARCH32_CNTPCTSS: |
c605ee24 MZ |
1435 | tmr = TIMER_PTIMER; |
1436 | treg = TIMER_REG_CNT; | |
1437 | break; | |
b59dbb91 MZ |
1438 | |
1439 | case SYS_CNTVCT_EL0: | |
1440 | case SYS_CNTVCTSS_EL0: | |
1441 | if (is_hyp_ctxt(vcpu)) | |
1442 | tmr = TIMER_HVTIMER; | |
1443 | else | |
1444 | tmr = TIMER_VTIMER; | |
1445 | treg = TIMER_REG_CNT; | |
1446 | break; | |
1447 | ||
1448 | case SYS_AARCH32_CNTVCT: | |
1449 | case SYS_AARCH32_CNTVCTSS: | |
1450 | tmr = TIMER_VTIMER; | |
1451 | treg = TIMER_REG_CNT; | |
1452 | break; | |
1453 | ||
84135d3d | 1454 | default: |
ba82e06c | 1455 | print_sys_reg_msg(p, "%s", "Unhandled trapped timer register"); |
cd08d321 | 1456 | return undef_access(vcpu, p, r); |
c1b135af | 1457 | } |
7b6b4631 | 1458 | |
7b6b4631 | 1459 | if (p->is_write) |
84135d3d | 1460 | kvm_arm_timer_write_sysreg(vcpu, tmr, treg, p->regval); |
7b6b4631 | 1461 | else |
84135d3d | 1462 | p->regval = kvm_arm_timer_read_sysreg(vcpu, tmr, treg); |
7b6b4631 | 1463 | |
c9a3c58f JL |
1464 | return true; |
1465 | } | |
1466 | ||
0e459810 MZ |
1467 | static bool access_hv_timer(struct kvm_vcpu *vcpu, |
1468 | struct sys_reg_params *p, | |
1469 | const struct sys_reg_desc *r) | |
1470 | { | |
1471 | if (!vcpu_el2_e2h_is_set(vcpu)) | |
1472 | return undef_access(vcpu, p, r); | |
1473 | ||
1474 | return access_arch_timer(vcpu, p, r); | |
1475 | } | |
1476 | ||
2e8bf0cb JZ |
1477 | static s64 kvm_arm64_ftr_safe_value(u32 id, const struct arm64_ftr_bits *ftrp, |
1478 | s64 new, s64 cur) | |
3d0dba57 | 1479 | { |
2e8bf0cb JZ |
1480 | struct arm64_ftr_bits kvm_ftr = *ftrp; |
1481 | ||
1482 | /* Some features have different safe value type in KVM than host features */ | |
1483 | switch (id) { | |
1484 | case SYS_ID_AA64DFR0_EL1: | |
a9bc4a1c OU |
1485 | switch (kvm_ftr.shift) { |
1486 | case ID_AA64DFR0_EL1_PMUVer_SHIFT: | |
2e8bf0cb | 1487 | kvm_ftr.type = FTR_LOWER_SAFE; |
a9bc4a1c OU |
1488 | break; |
1489 | case ID_AA64DFR0_EL1_DebugVer_SHIFT: | |
2e8bf0cb | 1490 | kvm_ftr.type = FTR_LOWER_SAFE; |
a9bc4a1c OU |
1491 | break; |
1492 | } | |
2e8bf0cb JZ |
1493 | break; |
1494 | case SYS_ID_DFR0_EL1: | |
1495 | if (kvm_ftr.shift == ID_DFR0_EL1_PerfMon_SHIFT) | |
1496 | kvm_ftr.type = FTR_LOWER_SAFE; | |
1497 | break; | |
1498 | } | |
3d0dba57 | 1499 | |
2e8bf0cb | 1500 | return arm64_ftr_safe_value(&kvm_ftr, new, cur); |
3d0dba57 MZ |
1501 | } |
1502 | ||
7b424ffc | 1503 | /* |
2e8bf0cb JZ |
1504 | * arm64_check_features() - Check if a feature register value constitutes |
1505 | * a subset of features indicated by the idreg's KVM sanitised limit. | |
1506 | * | |
1507 | * This function will check if each feature field of @val is the "safe" value | |
1508 | * against idreg's KVM sanitised limit return from reset() callback. | |
1509 | * If a field value in @val is the same as the one in limit, it is always | |
1510 | * considered the safe value regardless For register fields that are not in | |
1511 | * writable, only the value in limit is considered the safe value. | |
1512 | * | |
1513 | * Return: 0 if all the fields are safe. Otherwise, return negative errno. | |
1514 | */ | |
1515 | static int arm64_check_features(struct kvm_vcpu *vcpu, | |
1516 | const struct sys_reg_desc *rd, | |
1517 | u64 val) | |
d82e0dfd | 1518 | { |
2e8bf0cb JZ |
1519 | const struct arm64_ftr_reg *ftr_reg; |
1520 | const struct arm64_ftr_bits *ftrp = NULL; | |
1521 | u32 id = reg_to_encoding(rd); | |
1522 | u64 writable_mask = rd->val; | |
1523 | u64 limit = rd->reset(vcpu, rd); | |
1524 | u64 mask = 0; | |
1525 | ||
1526 | /* | |
1527 | * Hidden and unallocated ID registers may not have a corresponding | |
1528 | * struct arm64_ftr_reg. Of course, if the register is RAZ we know the | |
1529 | * only safe value is 0. | |
1530 | */ | |
1531 | if (sysreg_visible_as_raz(vcpu, rd)) | |
1532 | return val ? -E2BIG : 0; | |
1533 | ||
1534 | ftr_reg = get_arm64_ftr_reg(id); | |
1535 | if (!ftr_reg) | |
1536 | return -EINVAL; | |
1537 | ||
1538 | ftrp = ftr_reg->ftr_bits; | |
1539 | ||
1540 | for (; ftrp && ftrp->width; ftrp++) { | |
1541 | s64 f_val, f_lim, safe_val; | |
1542 | u64 ftr_mask; | |
1543 | ||
1544 | ftr_mask = arm64_ftr_mask(ftrp); | |
1545 | if ((ftr_mask & writable_mask) != ftr_mask) | |
1546 | continue; | |
1547 | ||
1548 | f_val = arm64_ftr_value(ftrp, val); | |
1549 | f_lim = arm64_ftr_value(ftrp, limit); | |
1550 | mask |= ftr_mask; | |
1551 | ||
1552 | if (f_val == f_lim) | |
1553 | safe_val = f_val; | |
1554 | else | |
1555 | safe_val = kvm_arm64_ftr_safe_value(id, ftrp, f_val, f_lim); | |
1556 | ||
1557 | if (safe_val != f_val) | |
1558 | return -E2BIG; | |
d82e0dfd | 1559 | } |
2e8bf0cb JZ |
1560 | |
1561 | /* For fields that are not writable, values in limit are the safe values. */ | |
1562 | if ((val & ~mask) != (limit & ~mask)) | |
1563 | return -E2BIG; | |
1564 | ||
1565 | return 0; | |
d82e0dfd MZ |
1566 | } |
1567 | ||
3d0dba57 MZ |
1568 | static u8 pmuver_to_perfmon(u8 pmuver) |
1569 | { | |
1570 | switch (pmuver) { | |
1571 | case ID_AA64DFR0_EL1_PMUVer_IMP: | |
753d734f | 1572 | return ID_DFR0_EL1_PerfMon_PMUv3; |
3d0dba57 | 1573 | case ID_AA64DFR0_EL1_PMUVer_IMP_DEF: |
753d734f | 1574 | return ID_DFR0_EL1_PerfMon_IMPDEF; |
3d0dba57 MZ |
1575 | default: |
1576 | /* Anything ARMv8.1+ and NI have the same value. For now. */ | |
1577 | return pmuver; | |
1578 | } | |
1579 | } | |
1580 | ||
7da540e2 JM |
1581 | static u64 sanitise_id_aa64pfr0_el1(const struct kvm_vcpu *vcpu, u64 val); |
1582 | static u64 sanitise_id_aa64dfr0_el1(const struct kvm_vcpu *vcpu, u64 val); | |
1583 | ||
93390c0a | 1584 | /* Read a sanitised cpufeature ID register by sys_reg_desc */ |
d86cde6e JZ |
1585 | static u64 __kvm_read_sanitised_id_reg(const struct kvm_vcpu *vcpu, |
1586 | const struct sys_reg_desc *r) | |
93390c0a | 1587 | { |
7ba8b438 | 1588 | u32 id = reg_to_encoding(r); |
00d5101b AE |
1589 | u64 val; |
1590 | ||
cdd5036d | 1591 | if (sysreg_visible_as_raz(vcpu, r)) |
00d5101b AE |
1592 | return 0; |
1593 | ||
1594 | val = read_sanitised_ftr_reg(id); | |
93390c0a | 1595 | |
c8857935 | 1596 | switch (id) { |
7da540e2 JM |
1597 | case SYS_ID_AA64DFR0_EL1: |
1598 | val = sanitise_id_aa64dfr0_el1(vcpu, val); | |
1599 | break; | |
1600 | case SYS_ID_AA64PFR0_EL1: | |
1601 | val = sanitise_id_aa64pfr0_el1(vcpu, val); | |
1602 | break; | |
c8857935 | 1603 | case SYS_ID_AA64PFR1_EL1: |
fe21ff5d | 1604 | if (!kvm_has_mte(vcpu->kvm)) { |
6ca2b9ca | 1605 | val &= ~ARM64_FEATURE_MASK(ID_AA64PFR1_EL1_MTE); |
fe21ff5d BH |
1606 | val &= ~ARM64_FEATURE_MASK(ID_AA64PFR1_EL1_MTE_frac); |
1607 | } | |
90807748 | 1608 | |
6ca2b9ca | 1609 | val &= ~ARM64_FEATURE_MASK(ID_AA64PFR1_EL1_SME); |
ffe68b2d SH |
1610 | val &= ~ARM64_FEATURE_MASK(ID_AA64PFR1_EL1_RNDR_trap); |
1611 | val &= ~ARM64_FEATURE_MASK(ID_AA64PFR1_EL1_NMI); | |
ffe68b2d SH |
1612 | val &= ~ARM64_FEATURE_MASK(ID_AA64PFR1_EL1_GCS); |
1613 | val &= ~ARM64_FEATURE_MASK(ID_AA64PFR1_EL1_THE); | |
1614 | val &= ~ARM64_FEATURE_MASK(ID_AA64PFR1_EL1_MTEX); | |
1615 | val &= ~ARM64_FEATURE_MASK(ID_AA64PFR1_EL1_DF2); | |
1616 | val &= ~ARM64_FEATURE_MASK(ID_AA64PFR1_EL1_PFAR); | |
6685f5d5 | 1617 | val &= ~ARM64_FEATURE_MASK(ID_AA64PFR1_EL1_MPAM_frac); |
c8857935 | 1618 | break; |
13c7a51e MZ |
1619 | case SYS_ID_AA64PFR2_EL1: |
1620 | /* We only expose FPMR */ | |
1621 | val &= ID_AA64PFR2_EL1_FPMR; | |
1622 | break; | |
c8857935 MZ |
1623 | case SYS_ID_AA64ISAR1_EL1: |
1624 | if (!vcpu_has_ptrauth(vcpu)) | |
aa50479b MB |
1625 | val &= ~(ARM64_FEATURE_MASK(ID_AA64ISAR1_EL1_APA) | |
1626 | ARM64_FEATURE_MASK(ID_AA64ISAR1_EL1_API) | | |
1627 | ARM64_FEATURE_MASK(ID_AA64ISAR1_EL1_GPA) | | |
1628 | ARM64_FEATURE_MASK(ID_AA64ISAR1_EL1_GPI)); | |
c8857935 | 1629 | break; |
def8c222 VM |
1630 | case SYS_ID_AA64ISAR2_EL1: |
1631 | if (!vcpu_has_ptrauth(vcpu)) | |
b2d71f27 MB |
1632 | val &= ~(ARM64_FEATURE_MASK(ID_AA64ISAR2_EL1_APA3) | |
1633 | ARM64_FEATURE_MASK(ID_AA64ISAR2_EL1_GPA3)); | |
0bc9a9e8 MZ |
1634 | if (!cpus_have_final_cap(ARM64_HAS_WFXT) || |
1635 | has_broken_cntvoff()) | |
b2d71f27 | 1636 | val &= ~ARM64_FEATURE_MASK(ID_AA64ISAR2_EL1_WFxT); |
def8c222 | 1637 | break; |
fd22af17 MB |
1638 | case SYS_ID_AA64ISAR3_EL1: |
1639 | val &= ID_AA64ISAR3_EL1_FPRCVT | ID_AA64ISAR3_EL1_FAMINMAX; | |
1640 | break; | |
bf48040c AO |
1641 | case SYS_ID_AA64MMFR2_EL1: |
1642 | val &= ~ID_AA64MMFR2_EL1_CCIDX_MASK; | |
9d674557 | 1643 | val &= ~ID_AA64MMFR2_EL1_NV; |
bf48040c | 1644 | break; |
70ed7238 | 1645 | case SYS_ID_AA64MMFR3_EL1: |
d4a89e5a MB |
1646 | val &= ID_AA64MMFR3_EL1_TCRX | ID_AA64MMFR3_EL1_S1POE | |
1647 | ID_AA64MMFR3_EL1_S1PIE; | |
70ed7238 | 1648 | break; |
bf48040c AO |
1649 | case SYS_ID_MMFR4_EL1: |
1650 | val &= ~ARM64_FEATURE_MASK(ID_MMFR4_EL1_CCIDX); | |
1651 | break; | |
07d79fe7 DM |
1652 | } |
1653 | ||
94f296dc MZ |
1654 | if (vcpu_has_nv(vcpu)) |
1655 | val = limit_nv_id_reg(vcpu->kvm, id, val); | |
1656 | ||
07d79fe7 | 1657 | return val; |
93390c0a DM |
1658 | } |
1659 | ||
d86cde6e JZ |
1660 | static u64 kvm_read_sanitised_id_reg(struct kvm_vcpu *vcpu, |
1661 | const struct sys_reg_desc *r) | |
1662 | { | |
1663 | return __kvm_read_sanitised_id_reg(vcpu, r); | |
1664 | } | |
1665 | ||
1666 | static u64 read_id_reg(const struct kvm_vcpu *vcpu, const struct sys_reg_desc *r) | |
1667 | { | |
97ca3fcc | 1668 | return kvm_read_vm_id_reg(vcpu->kvm, reg_to_encoding(r)); |
d86cde6e JZ |
1669 | } |
1670 | ||
e0163337 OU |
1671 | static bool is_feature_id_reg(u32 encoding) |
1672 | { | |
1673 | return (sys_reg_Op0(encoding) == 3 && | |
1674 | (sys_reg_Op1(encoding) < 2 || sys_reg_Op1(encoding) == 3) && | |
1675 | sys_reg_CRn(encoding) == 0 && | |
1676 | sys_reg_CRm(encoding) <= 7); | |
1677 | } | |
1678 | ||
47334146 JZ |
1679 | /* |
1680 | * Return true if the register's (Op0, Op1, CRn, CRm, Op2) is | |
592efc60 OU |
1681 | * (3, 0, 0, crm, op2), where 1<=crm<8, 0<=op2<8, which is the range of ID |
1682 | * registers KVM maintains on a per-VM basis. | |
b4043e7c SO |
1683 | * |
1684 | * Additionally, the implementation ID registers and CTR_EL0 are handled as | |
1685 | * per-VM registers. | |
47334146 | 1686 | */ |
592efc60 | 1687 | static inline bool is_vm_ftr_id_reg(u32 id) |
47334146 | 1688 | { |
b4043e7c SO |
1689 | switch (id) { |
1690 | case SYS_CTR_EL0: | |
1691 | case SYS_MIDR_EL1: | |
1692 | case SYS_REVIDR_EL1: | |
1693 | case SYS_AIDR_EL1: | |
2843cae2 | 1694 | return true; |
b4043e7c SO |
1695 | default: |
1696 | return (sys_reg_Op0(id) == 3 && sys_reg_Op1(id) == 0 && | |
1697 | sys_reg_CRn(id) == 0 && sys_reg_CRm(id) >= 1 && | |
1698 | sys_reg_CRm(id) < 8); | |
2843cae2 | 1699 | |
b4043e7c | 1700 | } |
47334146 JZ |
1701 | } |
1702 | ||
e0163337 OU |
1703 | static inline bool is_vcpu_ftr_id_reg(u32 id) |
1704 | { | |
1705 | return is_feature_id_reg(id) && !is_vm_ftr_id_reg(id); | |
1706 | } | |
1707 | ||
3f9cd0ca JZ |
1708 | static inline bool is_aa32_id_reg(u32 id) |
1709 | { | |
1710 | return (sys_reg_Op0(id) == 3 && sys_reg_Op1(id) == 0 && | |
1711 | sys_reg_CRn(id) == 0 && sys_reg_CRm(id) >= 1 && | |
1712 | sys_reg_CRm(id) <= 3); | |
1713 | } | |
1714 | ||
912dee57 AJ |
1715 | static unsigned int id_visibility(const struct kvm_vcpu *vcpu, |
1716 | const struct sys_reg_desc *r) | |
1717 | { | |
7ba8b438 | 1718 | u32 id = reg_to_encoding(r); |
c512298e AJ |
1719 | |
1720 | switch (id) { | |
1721 | case SYS_ID_AA64ZFR0_EL1: | |
1722 | if (!vcpu_has_sve(vcpu)) | |
1723 | return REG_RAZ; | |
1724 | break; | |
1725 | } | |
1726 | ||
912dee57 AJ |
1727 | return 0; |
1728 | } | |
1729 | ||
d5efec7e OU |
1730 | static unsigned int aa32_id_visibility(const struct kvm_vcpu *vcpu, |
1731 | const struct sys_reg_desc *r) | |
1732 | { | |
1733 | /* | |
1734 | * AArch32 ID registers are UNKNOWN if AArch32 isn't implemented at any | |
1735 | * EL. Promote to RAZ/WI in order to guarantee consistency between | |
1736 | * systems. | |
1737 | */ | |
1738 | if (!kvm_supports_32bit_el0()) | |
1739 | return REG_RAZ | REG_USER_WI; | |
1740 | ||
1741 | return id_visibility(vcpu, r); | |
1742 | } | |
1743 | ||
34b4d203 OU |
1744 | static unsigned int raz_visibility(const struct kvm_vcpu *vcpu, |
1745 | const struct sys_reg_desc *r) | |
1746 | { | |
1747 | return REG_RAZ; | |
1748 | } | |
1749 | ||
93390c0a DM |
1750 | /* cpufeature ID register access trap handlers */ |
1751 | ||
93390c0a DM |
1752 | static bool access_id_reg(struct kvm_vcpu *vcpu, |
1753 | struct sys_reg_params *p, | |
1754 | const struct sys_reg_desc *r) | |
1755 | { | |
4782ccc8 OU |
1756 | if (p->is_write) |
1757 | return write_to_read_only(vcpu, p, r); | |
1758 | ||
cdd5036d | 1759 | p->regval = read_id_reg(vcpu, r); |
9f75b6d4 | 1760 | |
4782ccc8 | 1761 | return true; |
93390c0a DM |
1762 | } |
1763 | ||
73433762 DM |
1764 | /* Visibility overrides for SVE-specific control registers */ |
1765 | static unsigned int sve_visibility(const struct kvm_vcpu *vcpu, | |
1766 | const struct sys_reg_desc *rd) | |
1767 | { | |
1768 | if (vcpu_has_sve(vcpu)) | |
1769 | return 0; | |
1770 | ||
01fe5ace | 1771 | return REG_HIDDEN; |
73433762 DM |
1772 | } |
1773 | ||
b5568894 MZ |
1774 | static unsigned int sme_visibility(const struct kvm_vcpu *vcpu, |
1775 | const struct sys_reg_desc *rd) | |
1776 | { | |
1777 | if (kvm_has_feat(vcpu->kvm, ID_AA64PFR1_EL1, SME, IMP)) | |
1778 | return 0; | |
1779 | ||
1780 | return REG_HIDDEN; | |
1781 | } | |
1782 | ||
7d9c1ed6 MZ |
1783 | static unsigned int fp8_visibility(const struct kvm_vcpu *vcpu, |
1784 | const struct sys_reg_desc *rd) | |
1785 | { | |
1786 | if (kvm_has_fpmr(vcpu->kvm)) | |
1787 | return 0; | |
1788 | ||
1789 | return REG_HIDDEN; | |
1790 | } | |
1791 | ||
7da540e2 | 1792 | static u64 sanitise_id_aa64pfr0_el1(const struct kvm_vcpu *vcpu, u64 val) |
23711a5e | 1793 | { |
c39f5974 JZ |
1794 | if (!vcpu_has_sve(vcpu)) |
1795 | val &= ~ID_AA64PFR0_EL1_SVE_MASK; | |
23711a5e MZ |
1796 | |
1797 | /* | |
c39f5974 JZ |
1798 | * The default is to expose CSV2 == 1 if the HW isn't affected. |
1799 | * Although this is a per-CPU feature, we make it global because | |
1800 | * asymmetric systems are just a nuisance. | |
1801 | * | |
1802 | * Userspace can override this as long as it doesn't promise | |
1803 | * the impossible. | |
23711a5e | 1804 | */ |
c39f5974 JZ |
1805 | if (arm64_get_spectre_v2_state() == SPECTRE_UNAFFECTED) { |
1806 | val &= ~ID_AA64PFR0_EL1_CSV2_MASK; | |
1807 | val |= SYS_FIELD_PREP_ENUM(ID_AA64PFR0_EL1, CSV2, IMP); | |
1808 | } | |
1809 | if (arm64_get_meltdown_state() == SPECTRE_UNAFFECTED) { | |
1810 | val &= ~ID_AA64PFR0_EL1_CSV3_MASK; | |
1811 | val |= SYS_FIELD_PREP_ENUM(ID_AA64PFR0_EL1, CSV3, IMP); | |
1812 | } | |
23711a5e | 1813 | |
c39f5974 JZ |
1814 | if (kvm_vgic_global_state.type == VGIC_V3) { |
1815 | val &= ~ID_AA64PFR0_EL1_GIC_MASK; | |
1816 | val |= SYS_FIELD_PREP_ENUM(ID_AA64PFR0_EL1, GIC, IMP); | |
1817 | } | |
4f1df628 | 1818 | |
c39f5974 | 1819 | val &= ~ID_AA64PFR0_EL1_AMU_MASK; |
23711a5e | 1820 | |
6685f5d5 JM |
1821 | /* |
1822 | * MPAM is disabled by default as KVM also needs a set of PARTID to | |
1823 | * program the MPAMVPMx_EL2 PARTID remapping registers with. But some | |
1824 | * older kernels let the guest see the ID bit. | |
1825 | */ | |
1826 | val &= ~ID_AA64PFR0_EL1_MPAM_MASK; | |
1827 | ||
c39f5974 JZ |
1828 | return val; |
1829 | } | |
23711a5e | 1830 | |
7da540e2 | 1831 | static u64 sanitise_id_aa64dfr0_el1(const struct kvm_vcpu *vcpu, u64 val) |
c118cead | 1832 | { |
9f9917bc | 1833 | val = ID_REG_LIMIT_FIELD_ENUM(val, ID_AA64DFR0_EL1, DebugVer, V8P8); |
c118cead JZ |
1834 | |
1835 | /* | |
1836 | * Only initialize the PMU version if the vCPU was configured with one. | |
1837 | */ | |
1838 | val &= ~ID_AA64DFR0_EL1_PMUVer_MASK; | |
1839 | if (kvm_vcpu_has_pmu(vcpu)) | |
1840 | val |= SYS_FIELD_PREP(ID_AA64DFR0_EL1, PMUVer, | |
1841 | kvm_arm_pmu_get_pmuver_limit()); | |
1842 | ||
1843 | /* Hide SPE from guests */ | |
1844 | val &= ~ID_AA64DFR0_EL1_PMSVer_MASK; | |
1845 | ||
a7f1fa55 MR |
1846 | /* Hide BRBE from guests */ |
1847 | val &= ~ID_AA64DFR0_EL1_BRBE_MASK; | |
1848 | ||
c118cead | 1849 | return val; |
23711a5e MZ |
1850 | } |
1851 | ||
60e651ff MZ |
1852 | static int set_id_aa64dfr0_el1(struct kvm_vcpu *vcpu, |
1853 | const struct sys_reg_desc *rd, | |
1854 | u64 val) | |
1855 | { | |
a9bc4a1c | 1856 | u8 debugver = SYS_FIELD_GET(ID_AA64DFR0_EL1, DebugVer, val); |
c118cead | 1857 | u8 pmuver = SYS_FIELD_GET(ID_AA64DFR0_EL1, PMUVer, val); |
60e651ff MZ |
1858 | |
1859 | /* | |
f90f9360 OU |
1860 | * Prior to commit 3d0dba5764b9 ("KVM: arm64: PMU: Move the |
1861 | * ID_AA64DFR0_EL1.PMUver limit to VM creation"), KVM erroneously | |
1862 | * exposed an IMP_DEF PMU to userspace and the guest on systems w/ | |
1863 | * non-architectural PMUs. Of course, PMUv3 is the only game in town for | |
1864 | * PMU virtualization, so the IMP_DEF value was rather user-hostile. | |
1865 | * | |
1866 | * At minimum, we're on the hook to allow values that were given to | |
1867 | * userspace by KVM. Cover our tracks here and replace the IMP_DEF value | |
1868 | * with a more sensible NI. The value of an ID register changing under | |
1869 | * the nose of the guest is unfortunate, but is certainly no more | |
1870 | * surprising than an ill-guided PMU driver poking at impdef system | |
1871 | * registers that end in an UNDEF... | |
60e651ff | 1872 | */ |
68667240 | 1873 | if (pmuver == ID_AA64DFR0_EL1_PMUVer_IMP_DEF) |
f90f9360 | 1874 | val &= ~ID_AA64DFR0_EL1_PMUVer_MASK; |
60e651ff | 1875 | |
a9bc4a1c OU |
1876 | /* |
1877 | * ID_AA64DFR0_EL1.DebugVer is one of those awkward fields with a | |
1878 | * nonzero minimum safe value. | |
1879 | */ | |
1880 | if (debugver < ID_AA64DFR0_EL1_DebugVer_IMP) | |
1881 | return -EINVAL; | |
1882 | ||
68667240 | 1883 | return set_id_reg(vcpu, rd, val); |
c118cead | 1884 | } |
60e651ff | 1885 | |
c118cead JZ |
1886 | static u64 read_sanitised_id_dfr0_el1(struct kvm_vcpu *vcpu, |
1887 | const struct sys_reg_desc *rd) | |
1888 | { | |
be5ccac3 | 1889 | u8 perfmon; |
c118cead | 1890 | u64 val = read_sanitised_ftr_reg(SYS_ID_DFR0_EL1); |
60e651ff | 1891 | |
c118cead | 1892 | val &= ~ID_DFR0_EL1_PerfMon_MASK; |
be5ccac3 AO |
1893 | if (kvm_vcpu_has_pmu(vcpu)) { |
1894 | perfmon = pmuver_to_perfmon(kvm_arm_pmu_get_pmuver_limit()); | |
c118cead | 1895 | val |= SYS_FIELD_PREP(ID_DFR0_EL1, PerfMon, perfmon); |
be5ccac3 | 1896 | } |
60e651ff | 1897 | |
9f9917bc OU |
1898 | val = ID_REG_LIMIT_FIELD_ENUM(val, ID_DFR0_EL1, CopDbg, Debugv8p8); |
1899 | ||
c118cead | 1900 | return val; |
60e651ff MZ |
1901 | } |
1902 | ||
d82e0dfd MZ |
1903 | static int set_id_dfr0_el1(struct kvm_vcpu *vcpu, |
1904 | const struct sys_reg_desc *rd, | |
1905 | u64 val) | |
1906 | { | |
c118cead | 1907 | u8 perfmon = SYS_FIELD_GET(ID_DFR0_EL1, PerfMon, val); |
a9bc4a1c | 1908 | u8 copdbg = SYS_FIELD_GET(ID_DFR0_EL1, CopDbg, val); |
d82e0dfd | 1909 | |
f90f9360 OU |
1910 | if (perfmon == ID_DFR0_EL1_PerfMon_IMPDEF) { |
1911 | val &= ~ID_DFR0_EL1_PerfMon_MASK; | |
1912 | perfmon = 0; | |
1913 | } | |
d82e0dfd MZ |
1914 | |
1915 | /* | |
1916 | * Allow DFR0_EL1.PerfMon to be set from userspace as long as | |
1917 | * it doesn't promise more than what the HW gives us on the | |
1918 | * AArch64 side (as everything is emulated with that), and | |
1919 | * that this is a PMUv3. | |
1920 | */ | |
c118cead | 1921 | if (perfmon != 0 && perfmon < ID_DFR0_EL1_PerfMon_PMUv3) |
d82e0dfd MZ |
1922 | return -EINVAL; |
1923 | ||
a9bc4a1c OU |
1924 | if (copdbg < ID_DFR0_EL1_CopDbg_Armv8) |
1925 | return -EINVAL; | |
1926 | ||
68667240 | 1927 | return set_id_reg(vcpu, rd, val); |
d82e0dfd MZ |
1928 | } |
1929 | ||
7da540e2 | 1930 | static int set_id_aa64pfr0_el1(struct kvm_vcpu *vcpu, |
6685f5d5 | 1931 | const struct sys_reg_desc *rd, u64 user_val) |
7da540e2 | 1932 | { |
6685f5d5 JM |
1933 | u64 hw_val = read_sanitised_ftr_reg(SYS_ID_AA64PFR0_EL1); |
1934 | u64 mpam_mask = ID_AA64PFR0_EL1_MPAM_MASK; | |
1935 | ||
1936 | /* | |
1937 | * Commit 011e5f5bf529f ("arm64/cpufeature: Add remaining feature bits | |
1938 | * in ID_AA64PFR0 register") exposed the MPAM field of AA64PFR0_EL1 to | |
1939 | * guests, but didn't add trap handling. KVM doesn't support MPAM and | |
1940 | * always returns an UNDEF for these registers. The guest must see 0 | |
1941 | * for this field. | |
1942 | * | |
1943 | * But KVM must also accept values from user-space that were provided | |
1944 | * by KVM. On CPUs that support MPAM, permit user-space to write | |
1945 | * the sanitizied value to ID_AA64PFR0_EL1.MPAM, but ignore this field. | |
1946 | */ | |
1947 | if ((hw_val & mpam_mask) == (user_val & mpam_mask)) | |
1948 | user_val &= ~ID_AA64PFR0_EL1_MPAM_MASK; | |
1949 | ||
7af7cfbe MZ |
1950 | /* Fail the guest's request to disable the AA64 ISA at EL{0,1,2} */ |
1951 | if (!FIELD_GET(ID_AA64PFR0_EL1_EL0, user_val) || | |
1952 | !FIELD_GET(ID_AA64PFR0_EL1_EL1, user_val) || | |
1953 | (vcpu_has_nv(vcpu) && !FIELD_GET(ID_AA64PFR0_EL1_EL2, user_val))) | |
1954 | return -EINVAL; | |
1955 | ||
6685f5d5 JM |
1956 | return set_id_reg(vcpu, rd, user_val); |
1957 | } | |
1958 | ||
1959 | static int set_id_aa64pfr1_el1(struct kvm_vcpu *vcpu, | |
1960 | const struct sys_reg_desc *rd, u64 user_val) | |
1961 | { | |
1962 | u64 hw_val = read_sanitised_ftr_reg(SYS_ID_AA64PFR1_EL1); | |
1963 | u64 mpam_mask = ID_AA64PFR1_EL1_MPAM_frac_MASK; | |
fe21ff5d BH |
1964 | u8 mte = SYS_FIELD_GET(ID_AA64PFR1_EL1, MTE, hw_val); |
1965 | u8 user_mte_frac = SYS_FIELD_GET(ID_AA64PFR1_EL1, MTE_frac, user_val); | |
1966 | u8 hw_mte_frac = SYS_FIELD_GET(ID_AA64PFR1_EL1, MTE_frac, hw_val); | |
6685f5d5 JM |
1967 | |
1968 | /* See set_id_aa64pfr0_el1 for comment about MPAM */ | |
1969 | if ((hw_val & mpam_mask) == (user_val & mpam_mask)) | |
1970 | user_val &= ~ID_AA64PFR1_EL1_MPAM_frac_MASK; | |
1971 | ||
fe21ff5d BH |
1972 | /* |
1973 | * Previously MTE_frac was hidden from guest. However, if the | |
1974 | * hardware supports MTE2 but not MTE_ASYM_FAULT then a value | |
1975 | * of 0 for this field indicates that the hardware supports | |
1976 | * MTE_ASYNC. Whereas, 0xf indicates MTE_ASYNC is not supported. | |
1977 | * | |
1978 | * As KVM must accept values from KVM provided by user-space, | |
1979 | * when ID_AA64PFR1_EL1.MTE is 2 allow user-space to set | |
1980 | * ID_AA64PFR1_EL1.MTE_frac to 0. However, ignore it to avoid | |
1981 | * incorrectly claiming hardware support for MTE_ASYNC in the | |
1982 | * guest. | |
1983 | */ | |
1984 | ||
1985 | if (mte == ID_AA64PFR1_EL1_MTE_MTE2 && | |
1986 | hw_mte_frac == ID_AA64PFR1_EL1_MTE_frac_NI && | |
1987 | user_mte_frac == ID_AA64PFR1_EL1_MTE_frac_ASYNC) { | |
1988 | user_val &= ~ID_AA64PFR1_EL1_MTE_frac_MASK; | |
1989 | user_val |= hw_val & ID_AA64PFR1_EL1_MTE_frac_MASK; | |
1990 | } | |
1991 | ||
6685f5d5 | 1992 | return set_id_reg(vcpu, rd, user_val); |
7da540e2 JM |
1993 | } |
1994 | ||
3f1e0727 SO |
1995 | static int set_id_aa64mmfr0_el1(struct kvm_vcpu *vcpu, |
1996 | const struct sys_reg_desc *rd, u64 user_val) | |
1997 | { | |
1998 | u64 sanitized_val = kvm_read_sanitised_id_reg(vcpu, rd); | |
1999 | u64 tgran2_mask = ID_AA64MMFR0_EL1_TGRAN4_2_MASK | | |
2000 | ID_AA64MMFR0_EL1_TGRAN16_2_MASK | | |
2001 | ID_AA64MMFR0_EL1_TGRAN64_2_MASK; | |
2002 | ||
2003 | if (vcpu_has_nv(vcpu) && | |
2004 | ((sanitized_val & tgran2_mask) != (user_val & tgran2_mask))) | |
2005 | return -EINVAL; | |
2006 | ||
2007 | return set_id_reg(vcpu, rd, user_val); | |
2008 | } | |
2009 | ||
9d674557 MZ |
2010 | static int set_id_aa64mmfr2_el1(struct kvm_vcpu *vcpu, |
2011 | const struct sys_reg_desc *rd, u64 user_val) | |
2012 | { | |
2013 | u64 hw_val = read_sanitised_ftr_reg(SYS_ID_AA64MMFR2_EL1); | |
2014 | u64 nv_mask = ID_AA64MMFR2_EL1_NV_MASK; | |
2015 | ||
2016 | /* | |
2017 | * We made the mistake to expose the now deprecated NV field, | |
2018 | * so allow userspace to write it, but silently ignore it. | |
2019 | */ | |
2020 | if ((hw_val & nv_mask) == (user_val & nv_mask)) | |
2021 | user_val &= ~nv_mask; | |
2022 | ||
2023 | return set_id_reg(vcpu, rd, user_val); | |
2024 | } | |
2025 | ||
e9b57d7f SK |
2026 | static int set_ctr_el0(struct kvm_vcpu *vcpu, |
2027 | const struct sys_reg_desc *rd, u64 user_val) | |
2028 | { | |
2029 | u8 user_L1Ip = SYS_FIELD_GET(CTR_EL0, L1Ip, user_val); | |
2030 | ||
2031 | /* | |
2032 | * Both AIVIVT (0b01) and VPIPT (0b00) are documented as reserved. | |
2033 | * Hence only allow to set VIPT(0b10) or PIPT(0b11) for L1Ip based | |
2034 | * on what hardware reports. | |
2035 | * | |
2036 | * Using a VIPT software model on PIPT will lead to over invalidation, | |
2037 | * but still correct. Hence, we can allow downgrading PIPT to VIPT, | |
2038 | * but not the other way around. This is handled via arm64_ftr_safe_value() | |
2039 | * as CTR_EL0 ftr_bits has L1Ip field with type FTR_EXACT and safe value | |
2040 | * set as VIPT. | |
2041 | */ | |
2042 | switch (user_L1Ip) { | |
2043 | case CTR_EL0_L1Ip_RESERVED_VPIPT: | |
2044 | case CTR_EL0_L1Ip_RESERVED_AIVIVT: | |
2045 | return -EINVAL; | |
2046 | case CTR_EL0_L1Ip_VIPT: | |
2047 | case CTR_EL0_L1Ip_PIPT: | |
2048 | return set_id_reg(vcpu, rd, user_val); | |
2049 | default: | |
2050 | return -ENOENT; | |
2051 | } | |
2052 | } | |
2053 | ||
93390c0a DM |
2054 | /* |
2055 | * cpufeature ID register user accessors | |
2056 | * | |
2057 | * For now, these registers are immutable for userspace, so no values | |
2058 | * are stored, and for set_id_reg() we don't allow the effective value | |
2059 | * to be changed. | |
2060 | */ | |
93390c0a | 2061 | static int get_id_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd, |
978ceeb3 | 2062 | u64 *val) |
93390c0a | 2063 | { |
6db7af0d OU |
2064 | /* |
2065 | * Avoid locking if the VM has already started, as the ID registers are | |
2066 | * guaranteed to be invariant at that point. | |
2067 | */ | |
2068 | if (kvm_vm_has_ran_once(vcpu->kvm)) { | |
2069 | *val = read_id_reg(vcpu, rd); | |
2070 | return 0; | |
2071 | } | |
2072 | ||
2073 | mutex_lock(&vcpu->kvm->arch.config_lock); | |
cdd5036d | 2074 | *val = read_id_reg(vcpu, rd); |
6db7af0d OU |
2075 | mutex_unlock(&vcpu->kvm->arch.config_lock); |
2076 | ||
4782ccc8 | 2077 | return 0; |
93390c0a DM |
2078 | } |
2079 | ||
2080 | static int set_id_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd, | |
978ceeb3 | 2081 | u64 val) |
93390c0a | 2082 | { |
2e8bf0cb JZ |
2083 | u32 id = reg_to_encoding(rd); |
2084 | int ret; | |
4782ccc8 | 2085 | |
2e8bf0cb JZ |
2086 | mutex_lock(&vcpu->kvm->arch.config_lock); |
2087 | ||
2088 | /* | |
2089 | * Once the VM has started the ID registers are immutable. Reject any | |
2090 | * write that does not match the final register value. | |
2091 | */ | |
2092 | if (kvm_vm_has_ran_once(vcpu->kvm)) { | |
2093 | if (val != read_id_reg(vcpu, rd)) | |
2094 | ret = -EBUSY; | |
2095 | else | |
2096 | ret = 0; | |
2097 | ||
2098 | mutex_unlock(&vcpu->kvm->arch.config_lock); | |
2099 | return ret; | |
2100 | } | |
2101 | ||
2102 | ret = arm64_check_features(vcpu, rd, val); | |
2103 | if (!ret) | |
d7508d27 | 2104 | kvm_set_vm_id_reg(vcpu->kvm, id, val); |
2e8bf0cb JZ |
2105 | |
2106 | mutex_unlock(&vcpu->kvm->arch.config_lock); | |
2107 | ||
2108 | /* | |
2109 | * arm64_check_features() returns -E2BIG to indicate the register's | |
2110 | * feature set is a superset of the maximally-allowed register value. | |
2111 | * While it would be nice to precisely describe this to userspace, the | |
2112 | * existing UAPI for KVM_SET_ONE_REG has it that invalid register | |
2113 | * writes return -EINVAL. | |
2114 | */ | |
2115 | if (ret == -E2BIG) | |
2116 | ret = -EINVAL; | |
2117 | return ret; | |
93390c0a DM |
2118 | } |
2119 | ||
d7508d27 OU |
2120 | void kvm_set_vm_id_reg(struct kvm *kvm, u32 reg, u64 val) |
2121 | { | |
2122 | u64 *p = __vm_id_reg(&kvm->arch, reg); | |
2123 | ||
2124 | lockdep_assert_held(&kvm->arch.config_lock); | |
2125 | ||
2126 | if (KVM_BUG_ON(kvm_vm_has_ran_once(kvm) || !p, kvm)) | |
2127 | return; | |
2128 | ||
2129 | *p = val; | |
2130 | } | |
2131 | ||
5a430976 | 2132 | static int get_raz_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd, |
978ceeb3 | 2133 | u64 *val) |
5a430976 | 2134 | { |
978ceeb3 MZ |
2135 | *val = 0; |
2136 | return 0; | |
5a430976 AE |
2137 | } |
2138 | ||
7a3ba309 | 2139 | static int set_wi_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd, |
978ceeb3 | 2140 | u64 val) |
7a3ba309 | 2141 | { |
7a3ba309 MZ |
2142 | return 0; |
2143 | } | |
2144 | ||
f7f2b15c AB |
2145 | static bool access_ctr(struct kvm_vcpu *vcpu, struct sys_reg_params *p, |
2146 | const struct sys_reg_desc *r) | |
2147 | { | |
2148 | if (p->is_write) | |
2149 | return write_to_read_only(vcpu, p, r); | |
2150 | ||
2843cae2 | 2151 | p->regval = kvm_read_vm_id_reg(vcpu->kvm, SYS_CTR_EL0); |
f7f2b15c AB |
2152 | return true; |
2153 | } | |
2154 | ||
2155 | static bool access_clidr(struct kvm_vcpu *vcpu, struct sys_reg_params *p, | |
2156 | const struct sys_reg_desc *r) | |
2157 | { | |
2158 | if (p->is_write) | |
2159 | return write_to_read_only(vcpu, p, r); | |
2160 | ||
7af0c253 | 2161 | p->regval = __vcpu_sys_reg(vcpu, r->reg); |
f7f2b15c AB |
2162 | return true; |
2163 | } | |
2164 | ||
7af0c253 AO |
2165 | /* |
2166 | * Fabricate a CLIDR_EL1 value instead of using the real value, which can vary | |
2167 | * by the physical CPU which the vcpu currently resides in. | |
2168 | */ | |
d86cde6e | 2169 | static u64 reset_clidr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r) |
7af0c253 AO |
2170 | { |
2171 | u64 ctr_el0 = read_sanitised_ftr_reg(SYS_CTR_EL0); | |
2172 | u64 clidr; | |
2173 | u8 loc; | |
2174 | ||
2175 | if ((ctr_el0 & CTR_EL0_IDC)) { | |
2176 | /* | |
2177 | * Data cache clean to the PoU is not required so LoUU and LoUIS | |
2178 | * will not be set and a unified cache, which will be marked as | |
2179 | * LoC, will be added. | |
2180 | * | |
2181 | * If not DIC, let the unified cache L2 so that an instruction | |
2182 | * cache can be added as L1 later. | |
2183 | */ | |
2184 | loc = (ctr_el0 & CTR_EL0_DIC) ? 1 : 2; | |
2185 | clidr = CACHE_TYPE_UNIFIED << CLIDR_CTYPE_SHIFT(loc); | |
2186 | } else { | |
2187 | /* | |
2188 | * Data cache clean to the PoU is required so let L1 have a data | |
2189 | * cache and mark it as LoUU and LoUIS. As L1 has a data cache, | |
2190 | * it can be marked as LoC too. | |
2191 | */ | |
2192 | loc = 1; | |
2193 | clidr = 1 << CLIDR_LOUU_SHIFT; | |
2194 | clidr |= 1 << CLIDR_LOUIS_SHIFT; | |
2195 | clidr |= CACHE_TYPE_DATA << CLIDR_CTYPE_SHIFT(1); | |
2196 | } | |
2197 | ||
2198 | /* | |
2199 | * Instruction cache invalidation to the PoU is required so let L1 have | |
2200 | * an instruction cache. If L1 already has a data cache, it will be | |
2201 | * CACHE_TYPE_SEPARATE. | |
2202 | */ | |
2203 | if (!(ctr_el0 & CTR_EL0_DIC)) | |
2204 | clidr |= CACHE_TYPE_INST << CLIDR_CTYPE_SHIFT(1); | |
2205 | ||
2206 | clidr |= loc << CLIDR_LOC_SHIFT; | |
2207 | ||
2208 | /* | |
2209 | * Add tag cache unified to data cache. Allocation tags and data are | |
2210 | * unified in a cache line so that it looks valid even if there is only | |
2211 | * one cache line. | |
2212 | */ | |
2213 | if (kvm_has_mte(vcpu->kvm)) | |
c6c167af | 2214 | clidr |= 2ULL << CLIDR_TTYPE_SHIFT(loc); |
7af0c253 | 2215 | |
6678791e | 2216 | __vcpu_assign_sys_reg(vcpu, r->reg, clidr); |
d86cde6e JZ |
2217 | |
2218 | return __vcpu_sys_reg(vcpu, r->reg); | |
7af0c253 AO |
2219 | } |
2220 | ||
2221 | static int set_clidr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd, | |
2222 | u64 val) | |
2223 | { | |
2224 | u64 ctr_el0 = read_sanitised_ftr_reg(SYS_CTR_EL0); | |
2225 | u64 idc = !CLIDR_LOC(val) || (!CLIDR_LOUIS(val) && !CLIDR_LOUU(val)); | |
2226 | ||
2227 | if ((val & CLIDR_EL1_RES0) || (!(ctr_el0 & CTR_EL0_IDC) && idc)) | |
2228 | return -EINVAL; | |
2229 | ||
6678791e | 2230 | __vcpu_assign_sys_reg(vcpu, rd->reg, val); |
7af0c253 AO |
2231 | |
2232 | return 0; | |
2233 | } | |
2234 | ||
f7f2b15c AB |
2235 | static bool access_csselr(struct kvm_vcpu *vcpu, struct sys_reg_params *p, |
2236 | const struct sys_reg_desc *r) | |
2237 | { | |
7c582bf4 JM |
2238 | int reg = r->reg; |
2239 | ||
f7f2b15c | 2240 | if (p->is_write) |
7c582bf4 | 2241 | vcpu_write_sys_reg(vcpu, p->regval, reg); |
f7f2b15c | 2242 | else |
7c582bf4 | 2243 | p->regval = vcpu_read_sys_reg(vcpu, reg); |
f7f2b15c AB |
2244 | return true; |
2245 | } | |
2246 | ||
2247 | static bool access_ccsidr(struct kvm_vcpu *vcpu, struct sys_reg_params *p, | |
2248 | const struct sys_reg_desc *r) | |
2249 | { | |
2250 | u32 csselr; | |
2251 | ||
2252 | if (p->is_write) | |
2253 | return write_to_read_only(vcpu, p, r); | |
2254 | ||
2255 | csselr = vcpu_read_sys_reg(vcpu, CSSELR_EL1); | |
7af0c253 AO |
2256 | csselr &= CSSELR_EL1_Level | CSSELR_EL1_InD; |
2257 | if (csselr < CSSELR_MAX) | |
2258 | p->regval = get_ccsidr(vcpu, csselr); | |
793acf87 | 2259 | |
f7f2b15c AB |
2260 | return true; |
2261 | } | |
2262 | ||
e1f358b5 SP |
2263 | static unsigned int mte_visibility(const struct kvm_vcpu *vcpu, |
2264 | const struct sys_reg_desc *rd) | |
2265 | { | |
673638f4 SP |
2266 | if (kvm_has_mte(vcpu->kvm)) |
2267 | return 0; | |
2268 | ||
e1f358b5 SP |
2269 | return REG_HIDDEN; |
2270 | } | |
2271 | ||
2272 | #define MTE_REG(name) { \ | |
2273 | SYS_DESC(SYS_##name), \ | |
2274 | .access = undef_access, \ | |
2275 | .reset = reset_unknown, \ | |
2276 | .reg = name, \ | |
2277 | .visibility = mte_visibility, \ | |
2278 | } | |
2279 | ||
6ff9dc23 JL |
2280 | static unsigned int el2_visibility(const struct kvm_vcpu *vcpu, |
2281 | const struct sys_reg_desc *rd) | |
2282 | { | |
2283 | if (vcpu_has_nv(vcpu)) | |
2284 | return 0; | |
2285 | ||
2286 | return REG_HIDDEN; | |
2287 | } | |
2288 | ||
9b9cce60 MZ |
2289 | static bool bad_vncr_trap(struct kvm_vcpu *vcpu, |
2290 | struct sys_reg_params *p, | |
2291 | const struct sys_reg_desc *r) | |
2292 | { | |
2293 | /* | |
2294 | * We really shouldn't be here, and this is likely the result | |
2295 | * of a misconfigured trap, as this register should target the | |
2296 | * VNCR page, and nothing else. | |
2297 | */ | |
2298 | return bad_trap(vcpu, p, r, | |
2299 | "trap of VNCR-backed register"); | |
2300 | } | |
2301 | ||
2302 | static bool bad_redir_trap(struct kvm_vcpu *vcpu, | |
2303 | struct sys_reg_params *p, | |
2304 | const struct sys_reg_desc *r) | |
2305 | { | |
2306 | /* | |
2307 | * We really shouldn't be here, and this is likely the result | |
2308 | * of a misconfigured trap, as this register should target the | |
2309 | * corresponding EL1, and nothing else. | |
2310 | */ | |
2311 | return bad_trap(vcpu, p, r, | |
2312 | "trap of EL2 register redirected to EL1"); | |
2313 | } | |
2314 | ||
997eeeca MB |
2315 | #define EL2_REG_FILTERED(name, acc, rst, v, filter) { \ |
2316 | SYS_DESC(SYS_##name), \ | |
2317 | .access = acc, \ | |
2318 | .reset = rst, \ | |
2319 | .reg = name, \ | |
2320 | .visibility = filter, \ | |
2321 | .val = v, \ | |
2322 | } | |
2323 | ||
6fb75733 MZ |
2324 | #define EL2_REG(name, acc, rst, v) \ |
2325 | EL2_REG_FILTERED(name, acc, rst, v, el2_visibility) | |
2326 | ||
9b9cce60 MZ |
2327 | #define EL2_REG_VNCR(name, rst, v) EL2_REG(name, bad_vncr_trap, rst, v) |
2328 | #define EL2_REG_REDIR(name, rst, v) EL2_REG(name, bad_redir_trap, rst, v) | |
2329 | ||
d86cde6e JZ |
2330 | /* |
2331 | * Since reset() callback and field val are not used for idregs, they will be | |
2332 | * used for specific purposes for idregs. | |
2333 | * The reset() would return KVM sanitised register value. The value would be the | |
2334 | * same as the host kernel sanitised value if there is no KVM sanitisation. | |
2335 | * The val would be used as a mask indicating writable fields for the idreg. | |
2336 | * Only bits with 1 are writable from userspace. This mask might not be | |
2337 | * necessary in the future whenever all ID registers are enabled as writable | |
2338 | * from userspace. | |
2339 | */ | |
2340 | ||
57e7de26 MZ |
2341 | #define ID_DESC_DEFAULT_CALLBACKS \ |
2342 | .access = access_id_reg, \ | |
2343 | .get_user = get_id_reg, \ | |
2344 | .set_user = set_id_reg, \ | |
2345 | .visibility = id_visibility, \ | |
2346 | .reset = kvm_read_sanitised_id_reg | |
2347 | ||
56d77aa8 | 2348 | #define ID_DESC(name) \ |
93390c0a | 2349 | SYS_DESC(SYS_##name), \ |
57e7de26 | 2350 | ID_DESC_DEFAULT_CALLBACKS |
56d77aa8 OU |
2351 | |
2352 | /* sys_reg_desc initialiser for known cpufeature ID registers */ | |
2353 | #define ID_SANITISED(name) { \ | |
2354 | ID_DESC(name), \ | |
d86cde6e | 2355 | .val = 0, \ |
93390c0a DM |
2356 | } |
2357 | ||
d5efec7e OU |
2358 | /* sys_reg_desc initialiser for known cpufeature ID registers */ |
2359 | #define AA32_ID_SANITISED(name) { \ | |
56d77aa8 | 2360 | ID_DESC(name), \ |
d5efec7e | 2361 | .visibility = aa32_id_visibility, \ |
d86cde6e | 2362 | .val = 0, \ |
d5efec7e OU |
2363 | } |
2364 | ||
56d77aa8 OU |
2365 | /* sys_reg_desc initialiser for writable ID registers */ |
2366 | #define ID_WRITABLE(name, mask) { \ | |
2367 | ID_DESC(name), \ | |
56d77aa8 OU |
2368 | .val = mask, \ |
2369 | } | |
2370 | ||
7da540e2 JM |
2371 | /* sys_reg_desc initialiser for cpufeature ID registers that need filtering */ |
2372 | #define ID_FILTERED(sysreg, name, mask) { \ | |
2373 | ID_DESC(sysreg), \ | |
2374 | .set_user = set_##name, \ | |
7da540e2 JM |
2375 | .val = (mask), \ |
2376 | } | |
2377 | ||
93390c0a DM |
2378 | /* |
2379 | * sys_reg_desc initialiser for architecturally unallocated cpufeature ID | |
2380 | * register with encoding Op0=3, Op1=0, CRn=0, CRm=crm, Op2=op2 | |
2381 | * (1 <= crm < 8, 0 <= Op2 < 8). | |
2382 | */ | |
2383 | #define ID_UNALLOCATED(crm, op2) { \ | |
57e7de26 | 2384 | .name = "S3_0_0_" #crm "_" #op2, \ |
93390c0a | 2385 | Op0(3), Op1(0), CRn(0), CRm(crm), Op2(op2), \ |
57e7de26 | 2386 | ID_DESC_DEFAULT_CALLBACKS, \ |
d86cde6e | 2387 | .visibility = raz_visibility, \ |
d86cde6e | 2388 | .val = 0, \ |
93390c0a DM |
2389 | } |
2390 | ||
2391 | /* | |
2392 | * sys_reg_desc initialiser for known ID registers that we hide from guests. | |
2393 | * For now, these are exposed just like unallocated ID regs: they appear | |
2394 | * RAZ for the guest. | |
2395 | */ | |
2396 | #define ID_HIDDEN(name) { \ | |
56d77aa8 | 2397 | ID_DESC(name), \ |
34b4d203 | 2398 | .visibility = raz_visibility, \ |
d86cde6e | 2399 | .val = 0, \ |
93390c0a DM |
2400 | } |
2401 | ||
6ff9dc23 JL |
2402 | static bool access_sp_el1(struct kvm_vcpu *vcpu, |
2403 | struct sys_reg_params *p, | |
2404 | const struct sys_reg_desc *r) | |
2405 | { | |
2406 | if (p->is_write) | |
6678791e | 2407 | __vcpu_assign_sys_reg(vcpu, SP_EL1, p->regval); |
6ff9dc23 JL |
2408 | else |
2409 | p->regval = __vcpu_sys_reg(vcpu, SP_EL1); | |
2410 | ||
2411 | return true; | |
2412 | } | |
2413 | ||
9da117ee JL |
2414 | static bool access_elr(struct kvm_vcpu *vcpu, |
2415 | struct sys_reg_params *p, | |
2416 | const struct sys_reg_desc *r) | |
2417 | { | |
2418 | if (p->is_write) | |
2419 | vcpu_write_sys_reg(vcpu, p->regval, ELR_EL1); | |
2420 | else | |
2421 | p->regval = vcpu_read_sys_reg(vcpu, ELR_EL1); | |
2422 | ||
2423 | return true; | |
2424 | } | |
2425 | ||
2426 | static bool access_spsr(struct kvm_vcpu *vcpu, | |
2427 | struct sys_reg_params *p, | |
2428 | const struct sys_reg_desc *r) | |
2429 | { | |
2430 | if (p->is_write) | |
6678791e | 2431 | __vcpu_assign_sys_reg(vcpu, SPSR_EL1, p->regval); |
9da117ee JL |
2432 | else |
2433 | p->regval = __vcpu_sys_reg(vcpu, SPSR_EL1); | |
2434 | ||
2435 | return true; | |
2436 | } | |
2437 | ||
989fce63 MZ |
2438 | static bool access_cntkctl_el12(struct kvm_vcpu *vcpu, |
2439 | struct sys_reg_params *p, | |
2440 | const struct sys_reg_desc *r) | |
2441 | { | |
2442 | if (p->is_write) | |
6678791e | 2443 | __vcpu_assign_sys_reg(vcpu, CNTKCTL_EL1, p->regval); |
989fce63 MZ |
2444 | else |
2445 | p->regval = __vcpu_sys_reg(vcpu, CNTKCTL_EL1); | |
2446 | ||
2447 | return true; | |
2448 | } | |
2449 | ||
94f29ab2 MZ |
2450 | static u64 reset_hcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r) |
2451 | { | |
2452 | u64 val = r->val; | |
2453 | ||
2454 | if (!cpus_have_final_cap(ARM64_HAS_HCR_NV1)) | |
2455 | val |= HCR_E2H; | |
2456 | ||
6678791e MZ |
2457 | __vcpu_assign_sys_reg(vcpu, r->reg, val); |
2458 | ||
2459 | return __vcpu_sys_reg(vcpu, r->reg); | |
94f29ab2 MZ |
2460 | } |
2461 | ||
ee3a9a06 MZ |
2462 | static unsigned int __el2_visibility(const struct kvm_vcpu *vcpu, |
2463 | const struct sys_reg_desc *rd, | |
2464 | unsigned int (*fn)(const struct kvm_vcpu *, | |
2465 | const struct sys_reg_desc *)) | |
2466 | { | |
2467 | return el2_visibility(vcpu, rd) ?: fn(vcpu, rd); | |
2468 | } | |
2469 | ||
b3d29a82 OU |
2470 | static unsigned int sve_el2_visibility(const struct kvm_vcpu *vcpu, |
2471 | const struct sys_reg_desc *rd) | |
2472 | { | |
ee3a9a06 | 2473 | return __el2_visibility(vcpu, rd, sve_visibility); |
b3d29a82 OU |
2474 | } |
2475 | ||
6fb75733 MZ |
2476 | static unsigned int vncr_el2_visibility(const struct kvm_vcpu *vcpu, |
2477 | const struct sys_reg_desc *rd) | |
2478 | { | |
2479 | if (el2_visibility(vcpu, rd) == 0 && | |
2480 | kvm_has_feat(vcpu->kvm, ID_AA64MMFR4_EL1, NV_frac, NV2_ONLY)) | |
2481 | return 0; | |
2482 | ||
2483 | return REG_HIDDEN; | |
2484 | } | |
2485 | ||
b3d29a82 OU |
2486 | static bool access_zcr_el2(struct kvm_vcpu *vcpu, |
2487 | struct sys_reg_params *p, | |
2488 | const struct sys_reg_desc *r) | |
2489 | { | |
2490 | unsigned int vq; | |
2491 | ||
2492 | if (guest_hyp_sve_traps_enabled(vcpu)) { | |
2493 | kvm_inject_nested_sve_trap(vcpu); | |
2494 | return true; | |
2495 | } | |
2496 | ||
2497 | if (!p->is_write) { | |
2498 | p->regval = vcpu_read_sys_reg(vcpu, ZCR_EL2); | |
2499 | return true; | |
2500 | } | |
2501 | ||
2502 | vq = SYS_FIELD_GET(ZCR_ELx, LEN, p->regval) + 1; | |
2503 | vq = min(vq, vcpu_sve_max_vq(vcpu)); | |
2504 | vcpu_write_sys_reg(vcpu, vq - 1, ZCR_EL2); | |
96c2f033 MZ |
2505 | |
2506 | return true; | |
2507 | } | |
2508 | ||
2509 | static bool access_gic_vtr(struct kvm_vcpu *vcpu, | |
2510 | struct sys_reg_params *p, | |
2511 | const struct sys_reg_desc *r) | |
2512 | { | |
2513 | if (p->is_write) | |
2514 | return write_to_read_only(vcpu, p, r); | |
2515 | ||
2516 | p->regval = kvm_vgic_global_state.ich_vtr_el2; | |
2517 | p->regval &= ~(ICH_VTR_EL2_DVIM | | |
2518 | ICH_VTR_EL2_A3V | | |
2519 | ICH_VTR_EL2_IDbits); | |
2520 | p->regval |= ICH_VTR_EL2_nV4; | |
2521 | ||
2522 | return true; | |
2523 | } | |
2524 | ||
2525 | static bool access_gic_misr(struct kvm_vcpu *vcpu, | |
2526 | struct sys_reg_params *p, | |
2527 | const struct sys_reg_desc *r) | |
2528 | { | |
2529 | if (p->is_write) | |
2530 | return write_to_read_only(vcpu, p, r); | |
2531 | ||
2532 | p->regval = vgic_v3_get_misr(vcpu); | |
2533 | ||
2534 | return true; | |
2535 | } | |
2536 | ||
2537 | static bool access_gic_eisr(struct kvm_vcpu *vcpu, | |
2538 | struct sys_reg_params *p, | |
2539 | const struct sys_reg_desc *r) | |
2540 | { | |
2541 | if (p->is_write) | |
2542 | return write_to_read_only(vcpu, p, r); | |
2543 | ||
2544 | p->regval = vgic_v3_get_eisr(vcpu); | |
2545 | ||
2546 | return true; | |
2547 | } | |
2548 | ||
2549 | static bool access_gic_elrsr(struct kvm_vcpu *vcpu, | |
2550 | struct sys_reg_params *p, | |
2551 | const struct sys_reg_desc *r) | |
2552 | { | |
2553 | if (p->is_write) | |
2554 | return write_to_read_only(vcpu, p, r); | |
2555 | ||
2556 | p->regval = vgic_v3_get_elrsr(vcpu); | |
2557 | ||
b3d29a82 OU |
2558 | return true; |
2559 | } | |
2560 | ||
b86c9bea JG |
2561 | static unsigned int s1poe_visibility(const struct kvm_vcpu *vcpu, |
2562 | const struct sys_reg_desc *rd) | |
2563 | { | |
26e89dcc | 2564 | if (kvm_has_s1poe(vcpu->kvm)) |
b86c9bea JG |
2565 | return 0; |
2566 | ||
2567 | return REG_HIDDEN; | |
2568 | } | |
2569 | ||
5970e990 MZ |
2570 | static unsigned int s1poe_el2_visibility(const struct kvm_vcpu *vcpu, |
2571 | const struct sys_reg_desc *rd) | |
2572 | { | |
2573 | return __el2_visibility(vcpu, rd, s1poe_visibility); | |
2574 | } | |
2575 | ||
0fcb4eea MB |
2576 | static unsigned int tcr2_visibility(const struct kvm_vcpu *vcpu, |
2577 | const struct sys_reg_desc *rd) | |
2578 | { | |
2579 | if (kvm_has_tcr2(vcpu->kvm)) | |
2580 | return 0; | |
2581 | ||
2582 | return REG_HIDDEN; | |
2583 | } | |
2584 | ||
2585 | static unsigned int tcr2_el2_visibility(const struct kvm_vcpu *vcpu, | |
2586 | const struct sys_reg_desc *rd) | |
2587 | { | |
2588 | return __el2_visibility(vcpu, rd, tcr2_visibility); | |
2589 | } | |
2590 | ||
a68cddbe MB |
2591 | static unsigned int s1pie_visibility(const struct kvm_vcpu *vcpu, |
2592 | const struct sys_reg_desc *rd) | |
2593 | { | |
2594 | if (kvm_has_s1pie(vcpu->kvm)) | |
2595 | return 0; | |
2596 | ||
2597 | return REG_HIDDEN; | |
2598 | } | |
2599 | ||
2600 | static unsigned int s1pie_el2_visibility(const struct kvm_vcpu *vcpu, | |
2601 | const struct sys_reg_desc *rd) | |
2602 | { | |
2603 | return __el2_visibility(vcpu, rd, s1pie_visibility); | |
2604 | } | |
2605 | ||
d3ba35b6 OU |
2606 | static bool access_mdcr(struct kvm_vcpu *vcpu, |
2607 | struct sys_reg_params *p, | |
2608 | const struct sys_reg_desc *r) | |
2609 | { | |
efff9dd2 | 2610 | u64 hpmn, val, old = __vcpu_sys_reg(vcpu, MDCR_EL2); |
d3ba35b6 | 2611 | |
efff9dd2 MZ |
2612 | if (!p->is_write) { |
2613 | p->regval = old; | |
2614 | return true; | |
2615 | } | |
2616 | ||
2617 | val = p->regval; | |
2618 | hpmn = FIELD_GET(MDCR_EL2_HPMN, val); | |
2619 | ||
2620 | /* | |
2621 | * If HPMN is out of bounds, limit it to what we actually | |
2622 | * support. This matches the UNKNOWN definition of the field | |
2623 | * in that case, and keeps the emulation simple. Sort of. | |
2624 | */ | |
2625 | if (hpmn > vcpu->kvm->arch.nr_pmu_counters) { | |
2626 | hpmn = vcpu->kvm->arch.nr_pmu_counters; | |
2627 | u64_replace_bits(val, hpmn, MDCR_EL2_HPMN); | |
2628 | } | |
2629 | ||
6678791e | 2630 | __vcpu_assign_sys_reg(vcpu, MDCR_EL2, val); |
d3ba35b6 OU |
2631 | |
2632 | /* | |
efff9dd2 MZ |
2633 | * Request a reload of the PMU to enable/disable the counters |
2634 | * affected by HPME. | |
d3ba35b6 | 2635 | */ |
efff9dd2 | 2636 | if ((old ^ val) & MDCR_EL2_HPME) |
d3ba35b6 OU |
2637 | kvm_make_request(KVM_REQ_RELOAD_PMU, vcpu); |
2638 | ||
2639 | return true; | |
2640 | } | |
2641 | ||
4cd48565 OU |
2642 | /* |
2643 | * For historical (ahem ABI) reasons, KVM treated MIDR_EL1, REVIDR_EL1, and | |
2644 | * AIDR_EL1 as "invariant" registers, meaning userspace cannot change them. | |
2645 | * The values made visible to userspace were the register values of the boot | |
2646 | * CPU. | |
2647 | * | |
2648 | * At the same time, reads from these registers at EL1 previously were not | |
2649 | * trapped, allowing the guest to read the actual hardware value. On big-little | |
2650 | * machines, this means the VM can see different values depending on where a | |
2651 | * given vCPU got scheduled. | |
2652 | * | |
2653 | * These registers are now trapped as collateral damage from SME, and what | |
2654 | * follows attempts to give a user / guest view consistent with the existing | |
2655 | * ABI. | |
2656 | */ | |
2657 | static bool access_imp_id_reg(struct kvm_vcpu *vcpu, | |
2658 | struct sys_reg_params *p, | |
2659 | const struct sys_reg_desc *r) | |
2660 | { | |
2661 | if (p->is_write) | |
2662 | return write_to_read_only(vcpu, p, r); | |
2663 | ||
3adaee78 SO |
2664 | /* |
2665 | * Return the VM-scoped implementation ID register values if userspace | |
2666 | * has made them writable. | |
2667 | */ | |
2668 | if (test_bit(KVM_ARCH_FLAG_WRITABLE_IMP_ID_REGS, &vcpu->kvm->arch.flags)) | |
2669 | return access_id_reg(vcpu, p, r); | |
2670 | ||
2671 | /* | |
2672 | * Otherwise, fall back to the old behavior of returning the value of | |
2673 | * the current CPU. | |
2674 | */ | |
4cd48565 OU |
2675 | switch (reg_to_encoding(r)) { |
2676 | case SYS_REVIDR_EL1: | |
2677 | p->regval = read_sysreg(revidr_el1); | |
2678 | break; | |
2679 | case SYS_AIDR_EL1: | |
2680 | p->regval = read_sysreg(aidr_el1); | |
2681 | break; | |
2682 | default: | |
2683 | WARN_ON_ONCE(1); | |
2684 | } | |
2685 | ||
2686 | return true; | |
2687 | } | |
2688 | ||
2689 | static u64 __ro_after_init boot_cpu_midr_val; | |
2690 | static u64 __ro_after_init boot_cpu_revidr_val; | |
2691 | static u64 __ro_after_init boot_cpu_aidr_val; | |
2692 | ||
2693 | static void init_imp_id_regs(void) | |
2694 | { | |
2695 | boot_cpu_midr_val = read_sysreg(midr_el1); | |
2696 | boot_cpu_revidr_val = read_sysreg(revidr_el1); | |
2697 | boot_cpu_aidr_val = read_sysreg(aidr_el1); | |
2698 | } | |
2699 | ||
b4043e7c | 2700 | static u64 reset_imp_id_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r) |
4cd48565 OU |
2701 | { |
2702 | switch (reg_to_encoding(r)) { | |
2703 | case SYS_MIDR_EL1: | |
b4043e7c | 2704 | return boot_cpu_midr_val; |
4cd48565 | 2705 | case SYS_REVIDR_EL1: |
b4043e7c | 2706 | return boot_cpu_revidr_val; |
4cd48565 | 2707 | case SYS_AIDR_EL1: |
b4043e7c | 2708 | return boot_cpu_aidr_val; |
4cd48565 | 2709 | default: |
b4043e7c SO |
2710 | KVM_BUG_ON(1, vcpu->kvm); |
2711 | return 0; | |
4cd48565 | 2712 | } |
4cd48565 OU |
2713 | } |
2714 | ||
2715 | static int set_imp_id_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r, | |
2716 | u64 val) | |
2717 | { | |
3adaee78 | 2718 | struct kvm *kvm = vcpu->kvm; |
4cd48565 | 2719 | u64 expected; |
4cd48565 | 2720 | |
3adaee78 SO |
2721 | guard(mutex)(&kvm->arch.config_lock); |
2722 | ||
b4043e7c | 2723 | expected = read_id_reg(vcpu, r); |
3adaee78 SO |
2724 | if (expected == val) |
2725 | return 0; | |
2726 | ||
2727 | if (!test_bit(KVM_ARCH_FLAG_WRITABLE_IMP_ID_REGS, &kvm->arch.flags)) | |
2728 | return -EINVAL; | |
4cd48565 | 2729 | |
3adaee78 SO |
2730 | /* |
2731 | * Once the VM has started the ID registers are immutable. Reject the | |
2732 | * write if userspace tries to change it. | |
2733 | */ | |
2734 | if (kvm_vm_has_ran_once(kvm)) | |
2735 | return -EBUSY; | |
2736 | ||
2737 | /* | |
2738 | * Any value is allowed for the implementation ID registers so long as | |
2739 | * it is within the writable mask. | |
2740 | */ | |
2741 | if ((val & r->val) != val) | |
2742 | return -EINVAL; | |
2743 | ||
2744 | kvm_set_vm_id_reg(kvm, reg_to_encoding(r), val); | |
2745 | return 0; | |
4cd48565 OU |
2746 | } |
2747 | ||
3adaee78 | 2748 | #define IMPLEMENTATION_ID(reg, mask) { \ |
4cd48565 OU |
2749 | SYS_DESC(SYS_##reg), \ |
2750 | .access = access_imp_id_reg, \ | |
b4043e7c | 2751 | .get_user = get_id_reg, \ |
4cd48565 | 2752 | .set_user = set_imp_id_reg, \ |
b4043e7c | 2753 | .reset = reset_imp_id_reg, \ |
3adaee78 | 2754 | .val = mask, \ |
c8823e51 MZ |
2755 | } |
2756 | ||
2757 | static u64 reset_mdcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r) | |
2758 | { | |
6678791e | 2759 | __vcpu_assign_sys_reg(vcpu, r->reg, vcpu->kvm->arch.nr_pmu_counters); |
c8823e51 | 2760 | return vcpu->kvm->arch.nr_pmu_counters; |
4cd48565 | 2761 | } |
d3ba35b6 | 2762 | |
7c8c5e6a MZ |
2763 | /* |
2764 | * Architected system registers. | |
2765 | * Important: Must be sorted ascending by Op0, Op1, CRn, CRm, Op2 | |
7609c125 | 2766 | * |
0c557ed4 MZ |
2767 | * Debug handling: We do trap most, if not all debug related system |
2768 | * registers. The implementation is good enough to ensure that a guest | |
2769 | * can use these with minimal performance degradation. The drawback is | |
7dabf02f OU |
2770 | * that we don't implement any of the external debug architecture. |
2771 | * This should be revisited if we ever encounter a more demanding | |
2772 | * guest... | |
7c8c5e6a MZ |
2773 | */ |
2774 | static const struct sys_reg_desc sys_reg_descs[] = { | |
0c557ed4 MZ |
2775 | DBG_BCR_BVR_WCR_WVR_EL1(0), |
2776 | DBG_BCR_BVR_WCR_WVR_EL1(1), | |
ee1b64e6 MR |
2777 | { SYS_DESC(SYS_MDCCINT_EL1), trap_debug_regs, reset_val, MDCCINT_EL1, 0 }, |
2778 | { SYS_DESC(SYS_MDSCR_EL1), trap_debug_regs, reset_val, MDSCR_EL1, 0 }, | |
0c557ed4 MZ |
2779 | DBG_BCR_BVR_WCR_WVR_EL1(2), |
2780 | DBG_BCR_BVR_WCR_WVR_EL1(3), | |
2781 | DBG_BCR_BVR_WCR_WVR_EL1(4), | |
2782 | DBG_BCR_BVR_WCR_WVR_EL1(5), | |
2783 | DBG_BCR_BVR_WCR_WVR_EL1(6), | |
2784 | DBG_BCR_BVR_WCR_WVR_EL1(7), | |
2785 | DBG_BCR_BVR_WCR_WVR_EL1(8), | |
2786 | DBG_BCR_BVR_WCR_WVR_EL1(9), | |
2787 | DBG_BCR_BVR_WCR_WVR_EL1(10), | |
2788 | DBG_BCR_BVR_WCR_WVR_EL1(11), | |
2789 | DBG_BCR_BVR_WCR_WVR_EL1(12), | |
2790 | DBG_BCR_BVR_WCR_WVR_EL1(13), | |
2791 | DBG_BCR_BVR_WCR_WVR_EL1(14), | |
2792 | DBG_BCR_BVR_WCR_WVR_EL1(15), | |
2793 | ||
ee1b64e6 | 2794 | { SYS_DESC(SYS_MDRAR_EL1), trap_raz_wi }, |
f24adc65 | 2795 | { SYS_DESC(SYS_OSLAR_EL1), trap_oslar_el1 }, |
d42e2671 | 2796 | { SYS_DESC(SYS_OSLSR_EL1), trap_oslsr_el1, reset_val, OSLSR_EL1, |
187de7c2 | 2797 | OSLSR_EL1_OSLM_IMPLEMENTED, .set_user = set_oslsr_el1, }, |
ee1b64e6 MR |
2798 | { SYS_DESC(SYS_OSDLR_EL1), trap_raz_wi }, |
2799 | { SYS_DESC(SYS_DBGPRCR_EL1), trap_raz_wi }, | |
2800 | { SYS_DESC(SYS_DBGCLAIMSET_EL1), trap_raz_wi }, | |
2801 | { SYS_DESC(SYS_DBGCLAIMCLR_EL1), trap_raz_wi }, | |
2802 | { SYS_DESC(SYS_DBGAUTHSTATUS_EL1), trap_dbgauthstatus_el1 }, | |
2803 | ||
2804 | { SYS_DESC(SYS_MDCCSR_EL0), trap_raz_wi }, | |
2805 | { SYS_DESC(SYS_DBGDTR_EL0), trap_raz_wi }, | |
2806 | // DBGDTR[TR]X_EL0 share the same encoding | |
2807 | { SYS_DESC(SYS_DBGDTRTX_EL0), trap_raz_wi }, | |
2808 | ||
cd08d321 | 2809 | { SYS_DESC(SYS_DBGVCR32_EL2), undef_access, reset_val, DBGVCR32_EL2, 0 }, |
62a89c44 | 2810 | |
3adaee78 | 2811 | IMPLEMENTATION_ID(MIDR_EL1, GENMASK_ULL(31, 0)), |
851050a5 | 2812 | { SYS_DESC(SYS_MPIDR_EL1), NULL, reset_mpidr, MPIDR_EL1 }, |
3adaee78 | 2813 | IMPLEMENTATION_ID(REVIDR_EL1, GENMASK_ULL(63, 0)), |
93390c0a DM |
2814 | |
2815 | /* | |
2816 | * ID regs: all ID_SANITISED() entries here must have corresponding | |
2817 | * entries in arm64_ftr_regs[]. | |
2818 | */ | |
2819 | ||
2820 | /* AArch64 mappings of the AArch32 ID registers */ | |
2821 | /* CRm=1 */ | |
d5efec7e OU |
2822 | AA32_ID_SANITISED(ID_PFR0_EL1), |
2823 | AA32_ID_SANITISED(ID_PFR1_EL1), | |
c118cead JZ |
2824 | { SYS_DESC(SYS_ID_DFR0_EL1), |
2825 | .access = access_id_reg, | |
2826 | .get_user = get_id_reg, | |
2827 | .set_user = set_id_dfr0_el1, | |
2828 | .visibility = aa32_id_visibility, | |
2829 | .reset = read_sanitised_id_dfr0_el1, | |
9f9917bc OU |
2830 | .val = ID_DFR0_EL1_PerfMon_MASK | |
2831 | ID_DFR0_EL1_CopDbg_MASK, }, | |
93390c0a | 2832 | ID_HIDDEN(ID_AFR0_EL1), |
d5efec7e OU |
2833 | AA32_ID_SANITISED(ID_MMFR0_EL1), |
2834 | AA32_ID_SANITISED(ID_MMFR1_EL1), | |
2835 | AA32_ID_SANITISED(ID_MMFR2_EL1), | |
2836 | AA32_ID_SANITISED(ID_MMFR3_EL1), | |
93390c0a DM |
2837 | |
2838 | /* CRm=2 */ | |
d5efec7e OU |
2839 | AA32_ID_SANITISED(ID_ISAR0_EL1), |
2840 | AA32_ID_SANITISED(ID_ISAR1_EL1), | |
2841 | AA32_ID_SANITISED(ID_ISAR2_EL1), | |
2842 | AA32_ID_SANITISED(ID_ISAR3_EL1), | |
2843 | AA32_ID_SANITISED(ID_ISAR4_EL1), | |
2844 | AA32_ID_SANITISED(ID_ISAR5_EL1), | |
2845 | AA32_ID_SANITISED(ID_MMFR4_EL1), | |
2846 | AA32_ID_SANITISED(ID_ISAR6_EL1), | |
93390c0a DM |
2847 | |
2848 | /* CRm=3 */ | |
d5efec7e OU |
2849 | AA32_ID_SANITISED(MVFR0_EL1), |
2850 | AA32_ID_SANITISED(MVFR1_EL1), | |
2851 | AA32_ID_SANITISED(MVFR2_EL1), | |
93390c0a | 2852 | ID_UNALLOCATED(3,3), |
d5efec7e | 2853 | AA32_ID_SANITISED(ID_PFR2_EL1), |
dd35ec07 | 2854 | ID_HIDDEN(ID_DFR1_EL1), |
d5efec7e | 2855 | AA32_ID_SANITISED(ID_MMFR5_EL1), |
93390c0a DM |
2856 | ID_UNALLOCATED(3,7), |
2857 | ||
2858 | /* AArch64 ID registers */ | |
2859 | /* CRm=4 */ | |
7da540e2 JM |
2860 | ID_FILTERED(ID_AA64PFR0_EL1, id_aa64pfr0_el1, |
2861 | ~(ID_AA64PFR0_EL1_AMU | | |
2862 | ID_AA64PFR0_EL1_MPAM | | |
2863 | ID_AA64PFR0_EL1_SVE | | |
2864 | ID_AA64PFR0_EL1_RAS | | |
2865 | ID_AA64PFR0_EL1_AdvSIMD | | |
2866 | ID_AA64PFR0_EL1_FP)), | |
6685f5d5 JM |
2867 | ID_FILTERED(ID_AA64PFR1_EL1, id_aa64pfr1_el1, |
2868 | ~(ID_AA64PFR1_EL1_PFAR | | |
78c4446b SH |
2869 | ID_AA64PFR1_EL1_DF2 | |
2870 | ID_AA64PFR1_EL1_MTEX | | |
2871 | ID_AA64PFR1_EL1_THE | | |
2872 | ID_AA64PFR1_EL1_GCS | | |
2873 | ID_AA64PFR1_EL1_MTE_frac | | |
2874 | ID_AA64PFR1_EL1_NMI | | |
2875 | ID_AA64PFR1_EL1_RNDR_trap | | |
2876 | ID_AA64PFR1_EL1_SME | | |
2877 | ID_AA64PFR1_EL1_RES0 | | |
2878 | ID_AA64PFR1_EL1_MPAM_frac | | |
2879 | ID_AA64PFR1_EL1_RAS_frac | | |
2880 | ID_AA64PFR1_EL1_MTE)), | |
13c7a51e | 2881 | ID_WRITABLE(ID_AA64PFR2_EL1, ID_AA64PFR2_EL1_FPMR), |
93390c0a | 2882 | ID_UNALLOCATED(4,3), |
f89fbb35 | 2883 | ID_WRITABLE(ID_AA64ZFR0_EL1, ~ID_AA64ZFR0_EL1_RES0), |
90807748 | 2884 | ID_HIDDEN(ID_AA64SMFR0_EL1), |
93390c0a | 2885 | ID_UNALLOCATED(4,6), |
6d730765 | 2886 | ID_WRITABLE(ID_AA64FPFR0_EL1, ~ID_AA64FPFR0_EL1_RES0), |
93390c0a DM |
2887 | |
2888 | /* CRm=5 */ | |
980c41f5 SK |
2889 | /* |
2890 | * Prior to FEAT_Debugv8.9, the architecture defines context-aware | |
2891 | * breakpoints (CTX_CMPs) as the highest numbered breakpoints (BRPs). | |
2892 | * KVM does not trap + emulate the breakpoint registers, and as such | |
2893 | * cannot support a layout that misaligns with the underlying hardware. | |
2894 | * While it may be possible to describe a subset that aligns with | |
2895 | * hardware, just prevent changes to BRPs and CTX_CMPs altogether for | |
2896 | * simplicity. | |
2897 | * | |
2898 | * See DDI0487K.a, section D2.8.3 Breakpoint types and linking | |
2899 | * of breakpoints for more details. | |
2900 | */ | |
7da540e2 JM |
2901 | ID_FILTERED(ID_AA64DFR0_EL1, id_aa64dfr0_el1, |
2902 | ID_AA64DFR0_EL1_DoubleLock_MASK | | |
2903 | ID_AA64DFR0_EL1_WRPs_MASK | | |
2904 | ID_AA64DFR0_EL1_PMUVer_MASK | | |
2905 | ID_AA64DFR0_EL1_DebugVer_MASK), | |
93390c0a DM |
2906 | ID_SANITISED(ID_AA64DFR1_EL1), |
2907 | ID_UNALLOCATED(5,2), | |
2908 | ID_UNALLOCATED(5,3), | |
2909 | ID_HIDDEN(ID_AA64AFR0_EL1), | |
2910 | ID_HIDDEN(ID_AA64AFR1_EL1), | |
2911 | ID_UNALLOCATED(5,6), | |
2912 | ID_UNALLOCATED(5,7), | |
2913 | ||
2914 | /* CRm=6 */ | |
56d77aa8 OU |
2915 | ID_WRITABLE(ID_AA64ISAR0_EL1, ~ID_AA64ISAR0_EL1_RES0), |
2916 | ID_WRITABLE(ID_AA64ISAR1_EL1, ~(ID_AA64ISAR1_EL1_GPI | | |
2917 | ID_AA64ISAR1_EL1_GPA | | |
2918 | ID_AA64ISAR1_EL1_API | | |
2919 | ID_AA64ISAR1_EL1_APA)), | |
2920 | ID_WRITABLE(ID_AA64ISAR2_EL1, ~(ID_AA64ISAR2_EL1_RES0 | | |
56d77aa8 OU |
2921 | ID_AA64ISAR2_EL1_APA3 | |
2922 | ID_AA64ISAR2_EL1_GPA3)), | |
fd22af17 MB |
2923 | ID_WRITABLE(ID_AA64ISAR3_EL1, (ID_AA64ISAR3_EL1_FPRCVT | |
2924 | ID_AA64ISAR3_EL1_FAMINMAX)), | |
93390c0a DM |
2925 | ID_UNALLOCATED(6,4), |
2926 | ID_UNALLOCATED(6,5), | |
2927 | ID_UNALLOCATED(6,6), | |
2928 | ID_UNALLOCATED(6,7), | |
2929 | ||
2930 | /* CRm=7 */ | |
3f1e0727 SO |
2931 | ID_FILTERED(ID_AA64MMFR0_EL1, id_aa64mmfr0_el1, |
2932 | ~(ID_AA64MMFR0_EL1_RES0 | | |
03c7527e | 2933 | ID_AA64MMFR0_EL1_ASIDBITS)), |
d5a32b60 JZ |
2934 | ID_WRITABLE(ID_AA64MMFR1_EL1, ~(ID_AA64MMFR1_EL1_RES0 | |
2935 | ID_AA64MMFR1_EL1_HCX | | |
d5a32b60 JZ |
2936 | ID_AA64MMFR1_EL1_TWED | |
2937 | ID_AA64MMFR1_EL1_XNX | | |
2938 | ID_AA64MMFR1_EL1_VH | | |
2939 | ID_AA64MMFR1_EL1_VMIDBits)), | |
9d674557 MZ |
2940 | ID_FILTERED(ID_AA64MMFR2_EL1, |
2941 | id_aa64mmfr2_el1, ~(ID_AA64MMFR2_EL1_RES0 | | |
d5a32b60 JZ |
2942 | ID_AA64MMFR2_EL1_EVT | |
2943 | ID_AA64MMFR2_EL1_FWB | | |
2944 | ID_AA64MMFR2_EL1_IDS | | |
2945 | ID_AA64MMFR2_EL1_NV | | |
2946 | ID_AA64MMFR2_EL1_CCIDX)), | |
70ed7238 | 2947 | ID_WRITABLE(ID_AA64MMFR3_EL1, (ID_AA64MMFR3_EL1_TCRX | |
d4a89e5a | 2948 | ID_AA64MMFR3_EL1_S1PIE | |
70ed7238 | 2949 | ID_AA64MMFR3_EL1_S1POE)), |
642c23ea | 2950 | ID_WRITABLE(ID_AA64MMFR4_EL1, ID_AA64MMFR4_EL1_NV_frac), |
93390c0a DM |
2951 | ID_UNALLOCATED(7,5), |
2952 | ID_UNALLOCATED(7,6), | |
2953 | ID_UNALLOCATED(7,7), | |
2954 | ||
851050a5 | 2955 | { SYS_DESC(SYS_SCTLR_EL1), access_vm_reg, reset_val, SCTLR_EL1, 0x00C50078 }, |
af473829 | 2956 | { SYS_DESC(SYS_ACTLR_EL1), access_actlr, reset_actlr, ACTLR_EL1 }, |
851050a5 | 2957 | { SYS_DESC(SYS_CPACR_EL1), NULL, reset_val, CPACR_EL1, 0 }, |
2ac638fc | 2958 | |
e1f358b5 SP |
2959 | MTE_REG(RGSR_EL1), |
2960 | MTE_REG(GCR_EL1), | |
2ac638fc | 2961 | |
73433762 | 2962 | { SYS_DESC(SYS_ZCR_EL1), NULL, reset_val, ZCR_EL1, 0, .visibility = sve_visibility }, |
cc427cbb | 2963 | { SYS_DESC(SYS_TRFCR_EL1), undef_access }, |
90807748 MB |
2964 | { SYS_DESC(SYS_SMPRI_EL1), undef_access }, |
2965 | { SYS_DESC(SYS_SMCR_EL1), undef_access }, | |
851050a5 MR |
2966 | { SYS_DESC(SYS_TTBR0_EL1), access_vm_reg, reset_unknown, TTBR0_EL1 }, |
2967 | { SYS_DESC(SYS_TTBR1_EL1), access_vm_reg, reset_unknown, TTBR1_EL1 }, | |
2968 | { SYS_DESC(SYS_TCR_EL1), access_vm_reg, reset_val, TCR_EL1, 0 }, | |
0fcb4eea MB |
2969 | { SYS_DESC(SYS_TCR2_EL1), access_vm_reg, reset_val, TCR2_EL1, 0, |
2970 | .visibility = tcr2_visibility }, | |
851050a5 | 2971 | |
384b40ca MR |
2972 | PTRAUTH_KEY(APIA), |
2973 | PTRAUTH_KEY(APIB), | |
2974 | PTRAUTH_KEY(APDA), | |
2975 | PTRAUTH_KEY(APDB), | |
2976 | PTRAUTH_KEY(APGA), | |
2977 | ||
9da117ee JL |
2978 | { SYS_DESC(SYS_SPSR_EL1), access_spsr}, |
2979 | { SYS_DESC(SYS_ELR_EL1), access_elr}, | |
2980 | ||
4a999a1d MZ |
2981 | { SYS_DESC(SYS_ICC_PMR_EL1), undef_access }, |
2982 | ||
851050a5 MR |
2983 | { SYS_DESC(SYS_AFSR0_EL1), access_vm_reg, reset_unknown, AFSR0_EL1 }, |
2984 | { SYS_DESC(SYS_AFSR1_EL1), access_vm_reg, reset_unknown, AFSR1_EL1 }, | |
2985 | { SYS_DESC(SYS_ESR_EL1), access_vm_reg, reset_unknown, ESR_EL1 }, | |
558daf69 DG |
2986 | |
2987 | { SYS_DESC(SYS_ERRIDR_EL1), trap_raz_wi }, | |
2988 | { SYS_DESC(SYS_ERRSELR_EL1), trap_raz_wi }, | |
2989 | { SYS_DESC(SYS_ERXFR_EL1), trap_raz_wi }, | |
2990 | { SYS_DESC(SYS_ERXCTLR_EL1), trap_raz_wi }, | |
2991 | { SYS_DESC(SYS_ERXSTATUS_EL1), trap_raz_wi }, | |
2992 | { SYS_DESC(SYS_ERXADDR_EL1), trap_raz_wi }, | |
2993 | { SYS_DESC(SYS_ERXMISC0_EL1), trap_raz_wi }, | |
2994 | { SYS_DESC(SYS_ERXMISC1_EL1), trap_raz_wi }, | |
2995 | ||
e1f358b5 SP |
2996 | MTE_REG(TFSR_EL1), |
2997 | MTE_REG(TFSRE0_EL1), | |
2ac638fc | 2998 | |
851050a5 MR |
2999 | { SYS_DESC(SYS_FAR_EL1), access_vm_reg, reset_unknown, FAR_EL1 }, |
3000 | { SYS_DESC(SYS_PAR_EL1), NULL, reset_unknown, PAR_EL1 }, | |
7c8c5e6a | 3001 | |
13611bc8 AE |
3002 | { SYS_DESC(SYS_PMSCR_EL1), undef_access }, |
3003 | { SYS_DESC(SYS_PMSNEVFR_EL1), undef_access }, | |
3004 | { SYS_DESC(SYS_PMSICR_EL1), undef_access }, | |
3005 | { SYS_DESC(SYS_PMSIRR_EL1), undef_access }, | |
3006 | { SYS_DESC(SYS_PMSFCR_EL1), undef_access }, | |
3007 | { SYS_DESC(SYS_PMSEVFR_EL1), undef_access }, | |
3008 | { SYS_DESC(SYS_PMSLATFR_EL1), undef_access }, | |
3009 | { SYS_DESC(SYS_PMSIDR_EL1), undef_access }, | |
3010 | { SYS_DESC(SYS_PMBLIMITR_EL1), undef_access }, | |
3011 | { SYS_DESC(SYS_PMBPTR_EL1), undef_access }, | |
3012 | { SYS_DESC(SYS_PMBSR_EL1), undef_access }, | |
3013 | /* PMBIDR_EL1 is not trapped */ | |
3014 | ||
9d2a55b4 | 3015 | { PMU_SYS_REG(PMINTENSET_EL1), |
a45f41d7 RRA |
3016 | .access = access_pminten, .reg = PMINTENSET_EL1, |
3017 | .get_user = get_pmreg, .set_user = set_pmreg }, | |
9d2a55b4 | 3018 | { PMU_SYS_REG(PMINTENCLR_EL1), |
a45f41d7 RRA |
3019 | .access = access_pminten, .reg = PMINTENSET_EL1, |
3020 | .get_user = get_pmreg, .set_user = set_pmreg }, | |
46081078 | 3021 | { SYS_DESC(SYS_PMMIR_EL1), trap_raz_wi }, |
7c8c5e6a | 3022 | |
851050a5 | 3023 | { SYS_DESC(SYS_MAIR_EL1), access_vm_reg, reset_unknown, MAIR_EL1 }, |
a68cddbe MB |
3024 | { SYS_DESC(SYS_PIRE0_EL1), NULL, reset_unknown, PIRE0_EL1, |
3025 | .visibility = s1pie_visibility }, | |
3026 | { SYS_DESC(SYS_PIR_EL1), NULL, reset_unknown, PIR_EL1, | |
3027 | .visibility = s1pie_visibility }, | |
b86c9bea JG |
3028 | { SYS_DESC(SYS_POR_EL1), NULL, reset_unknown, POR_EL1, |
3029 | .visibility = s1poe_visibility }, | |
851050a5 | 3030 | { SYS_DESC(SYS_AMAIR_EL1), access_vm_reg, reset_amair_el1, AMAIR_EL1 }, |
7c8c5e6a | 3031 | |
22925521 MZ |
3032 | { SYS_DESC(SYS_LORSA_EL1), trap_loregion }, |
3033 | { SYS_DESC(SYS_LOREA_EL1), trap_loregion }, | |
3034 | { SYS_DESC(SYS_LORN_EL1), trap_loregion }, | |
3035 | { SYS_DESC(SYS_LORC_EL1), trap_loregion }, | |
31ff96c3 | 3036 | { SYS_DESC(SYS_MPAMIDR_EL1), undef_access }, |
22925521 | 3037 | { SYS_DESC(SYS_LORID_EL1), trap_loregion }, |
cc33c4e2 | 3038 | |
31ff96c3 JM |
3039 | { SYS_DESC(SYS_MPAM1_EL1), undef_access }, |
3040 | { SYS_DESC(SYS_MPAM0_EL1), undef_access }, | |
9da117ee | 3041 | { SYS_DESC(SYS_VBAR_EL1), access_rw, reset_val, VBAR_EL1, 0 }, |
c773ae2b | 3042 | { SYS_DESC(SYS_DISR_EL1), NULL, reset_val, DISR_EL1, 0 }, |
db7dedd0 | 3043 | |
4a999a1d MZ |
3044 | { SYS_DESC(SYS_ICC_IAR0_EL1), undef_access }, |
3045 | { SYS_DESC(SYS_ICC_EOIR0_EL1), undef_access }, | |
3046 | { SYS_DESC(SYS_ICC_HPPIR0_EL1), undef_access }, | |
3047 | { SYS_DESC(SYS_ICC_BPR0_EL1), undef_access }, | |
3048 | { SYS_DESC(SYS_ICC_AP0R0_EL1), undef_access }, | |
3049 | { SYS_DESC(SYS_ICC_AP0R1_EL1), undef_access }, | |
3050 | { SYS_DESC(SYS_ICC_AP0R2_EL1), undef_access }, | |
3051 | { SYS_DESC(SYS_ICC_AP0R3_EL1), undef_access }, | |
3052 | { SYS_DESC(SYS_ICC_AP1R0_EL1), undef_access }, | |
3053 | { SYS_DESC(SYS_ICC_AP1R1_EL1), undef_access }, | |
3054 | { SYS_DESC(SYS_ICC_AP1R2_EL1), undef_access }, | |
3055 | { SYS_DESC(SYS_ICC_AP1R3_EL1), undef_access }, | |
3056 | { SYS_DESC(SYS_ICC_DIR_EL1), undef_access }, | |
3057 | { SYS_DESC(SYS_ICC_RPR_EL1), undef_access }, | |
e804d208 | 3058 | { SYS_DESC(SYS_ICC_SGI1R_EL1), access_gic_sgi }, |
03bd646d MZ |
3059 | { SYS_DESC(SYS_ICC_ASGI1R_EL1), access_gic_sgi }, |
3060 | { SYS_DESC(SYS_ICC_SGI0R_EL1), access_gic_sgi }, | |
4a999a1d MZ |
3061 | { SYS_DESC(SYS_ICC_IAR1_EL1), undef_access }, |
3062 | { SYS_DESC(SYS_ICC_EOIR1_EL1), undef_access }, | |
3063 | { SYS_DESC(SYS_ICC_HPPIR1_EL1), undef_access }, | |
3064 | { SYS_DESC(SYS_ICC_BPR1_EL1), undef_access }, | |
3065 | { SYS_DESC(SYS_ICC_CTLR_EL1), undef_access }, | |
e804d208 | 3066 | { SYS_DESC(SYS_ICC_SRE_EL1), access_gic_sre }, |
4a999a1d MZ |
3067 | { SYS_DESC(SYS_ICC_IGRPEN0_EL1), undef_access }, |
3068 | { SYS_DESC(SYS_ICC_IGRPEN1_EL1), undef_access }, | |
db7dedd0 | 3069 | |
851050a5 MR |
3070 | { SYS_DESC(SYS_CONTEXTIDR_EL1), access_vm_reg, reset_val, CONTEXTIDR_EL1, 0 }, |
3071 | { SYS_DESC(SYS_TPIDR_EL1), NULL, reset_unknown, TPIDR_EL1 }, | |
7c8c5e6a | 3072 | |
484f8682 MZ |
3073 | { SYS_DESC(SYS_ACCDATA_EL1), undef_access }, |
3074 | ||
ed4ffaf4 MZ |
3075 | { SYS_DESC(SYS_SCXTNUM_EL1), undef_access }, |
3076 | ||
851050a5 | 3077 | { SYS_DESC(SYS_CNTKCTL_EL1), NULL, reset_val, CNTKCTL_EL1, 0}, |
7c8c5e6a | 3078 | |
f7f2b15c | 3079 | { SYS_DESC(SYS_CCSIDR_EL1), access_ccsidr }, |
7af0c253 | 3080 | { SYS_DESC(SYS_CLIDR_EL1), access_clidr, reset_clidr, CLIDR_EL1, |
bb4fa769 | 3081 | .set_user = set_clidr, .val = ~CLIDR_EL1_RES0 }, |
bf48040c | 3082 | { SYS_DESC(SYS_CCSIDR2_EL1), undef_access }, |
90807748 | 3083 | { SYS_DESC(SYS_SMIDR_EL1), undef_access }, |
3adaee78 | 3084 | IMPLEMENTATION_ID(AIDR_EL1, GENMASK_ULL(63, 0)), |
f7f2b15c | 3085 | { SYS_DESC(SYS_CSSELR_EL1), access_csselr, reset_unknown, CSSELR_EL1 }, |
e9b57d7f SK |
3086 | ID_FILTERED(CTR_EL0, ctr_el0, |
3087 | CTR_EL0_DIC_MASK | | |
3088 | CTR_EL0_IDC_MASK | | |
3089 | CTR_EL0_DminLine_MASK | | |
3090 | CTR_EL0_L1Ip_MASK | | |
3091 | CTR_EL0_IminLine_MASK), | |
b5568894 | 3092 | { SYS_DESC(SYS_SVCR), undef_access, reset_val, SVCR, 0, .visibility = sme_visibility }, |
7d9c1ed6 | 3093 | { SYS_DESC(SYS_FPMR), undef_access, reset_val, FPMR, 0, .visibility = fp8_visibility }, |
7c8c5e6a | 3094 | |
ea9ca904 RW |
3095 | { PMU_SYS_REG(PMCR_EL0), .access = access_pmcr, .reset = reset_pmcr, |
3096 | .reg = PMCR_EL0, .get_user = get_pmcr, .set_user = set_pmcr }, | |
9d2a55b4 | 3097 | { PMU_SYS_REG(PMCNTENSET_EL0), |
a45f41d7 RRA |
3098 | .access = access_pmcnten, .reg = PMCNTENSET_EL0, |
3099 | .get_user = get_pmreg, .set_user = set_pmreg }, | |
9d2a55b4 | 3100 | { PMU_SYS_REG(PMCNTENCLR_EL0), |
a45f41d7 RRA |
3101 | .access = access_pmcnten, .reg = PMCNTENSET_EL0, |
3102 | .get_user = get_pmreg, .set_user = set_pmreg }, | |
9d2a55b4 | 3103 | { PMU_SYS_REG(PMOVSCLR_EL0), |
a45f41d7 RRA |
3104 | .access = access_pmovs, .reg = PMOVSSET_EL0, |
3105 | .get_user = get_pmreg, .set_user = set_pmreg }, | |
7a3ba309 MZ |
3106 | /* |
3107 | * PM_SWINC_EL0 is exposed to userspace as RAZ/WI, as it was | |
3108 | * previously (and pointlessly) advertised in the past... | |
3109 | */ | |
9d2a55b4 | 3110 | { PMU_SYS_REG(PMSWINC_EL0), |
5a430976 | 3111 | .get_user = get_raz_reg, .set_user = set_wi_reg, |
7a3ba309 | 3112 | .access = access_pmswinc, .reset = NULL }, |
9d2a55b4 | 3113 | { PMU_SYS_REG(PMSELR_EL0), |
0ab410a9 | 3114 | .access = access_pmselr, .reset = reset_pmselr, .reg = PMSELR_EL0 }, |
9d2a55b4 | 3115 | { PMU_SYS_REG(PMCEID0_EL0), |
11663111 | 3116 | .access = access_pmceid, .reset = NULL }, |
9d2a55b4 | 3117 | { PMU_SYS_REG(PMCEID1_EL0), |
11663111 | 3118 | .access = access_pmceid, .reset = NULL }, |
9d2a55b4 | 3119 | { PMU_SYS_REG(PMCCNTR_EL0), |
9228b261 | 3120 | .access = access_pmu_evcntr, .reset = reset_unknown, |
64074ca8 AO |
3121 | .reg = PMCCNTR_EL0, .get_user = get_pmu_evcntr, |
3122 | .set_user = set_pmu_evcntr }, | |
9d2a55b4 | 3123 | { PMU_SYS_REG(PMXEVTYPER_EL0), |
11663111 | 3124 | .access = access_pmu_evtyper, .reset = NULL }, |
9d2a55b4 | 3125 | { PMU_SYS_REG(PMXEVCNTR_EL0), |
11663111 | 3126 | .access = access_pmu_evcntr, .reset = NULL }, |
174ed3e4 MR |
3127 | /* |
3128 | * PMUSERENR_EL0 resets as unknown in 64bit mode while it resets as zero | |
d692b8ad SZ |
3129 | * in 32bit mode. Here we choose to reset it as zero for consistency. |
3130 | */ | |
9d2a55b4 | 3131 | { PMU_SYS_REG(PMUSERENR_EL0), .access = access_pmuserenr, |
11663111 | 3132 | .reset = reset_val, .reg = PMUSERENR_EL0, .val = 0 }, |
9d2a55b4 | 3133 | { PMU_SYS_REG(PMOVSSET_EL0), |
a45f41d7 RRA |
3134 | .access = access_pmovs, .reg = PMOVSSET_EL0, |
3135 | .get_user = get_pmreg, .set_user = set_pmreg }, | |
7c8c5e6a | 3136 | |
b86c9bea JG |
3137 | { SYS_DESC(SYS_POR_EL0), NULL, reset_unknown, POR_EL0, |
3138 | .visibility = s1poe_visibility }, | |
851050a5 MR |
3139 | { SYS_DESC(SYS_TPIDR_EL0), NULL, reset_unknown, TPIDR_EL0 }, |
3140 | { SYS_DESC(SYS_TPIDRRO_EL0), NULL, reset_unknown, TPIDRRO_EL0 }, | |
90807748 | 3141 | { SYS_DESC(SYS_TPIDR2_EL0), undef_access }, |
4fcdf106 | 3142 | |
ed4ffaf4 MZ |
3143 | { SYS_DESC(SYS_SCXTNUM_EL0), undef_access }, |
3144 | ||
338b1793 MZ |
3145 | { SYS_DESC(SYS_AMCR_EL0), undef_access }, |
3146 | { SYS_DESC(SYS_AMCFGR_EL0), undef_access }, | |
3147 | { SYS_DESC(SYS_AMCGCR_EL0), undef_access }, | |
3148 | { SYS_DESC(SYS_AMUSERENR_EL0), undef_access }, | |
3149 | { SYS_DESC(SYS_AMCNTENCLR0_EL0), undef_access }, | |
3150 | { SYS_DESC(SYS_AMCNTENSET0_EL0), undef_access }, | |
3151 | { SYS_DESC(SYS_AMCNTENCLR1_EL0), undef_access }, | |
3152 | { SYS_DESC(SYS_AMCNTENSET1_EL0), undef_access }, | |
4fcdf106 IV |
3153 | AMU_AMEVCNTR0_EL0(0), |
3154 | AMU_AMEVCNTR0_EL0(1), | |
3155 | AMU_AMEVCNTR0_EL0(2), | |
3156 | AMU_AMEVCNTR0_EL0(3), | |
3157 | AMU_AMEVCNTR0_EL0(4), | |
3158 | AMU_AMEVCNTR0_EL0(5), | |
3159 | AMU_AMEVCNTR0_EL0(6), | |
3160 | AMU_AMEVCNTR0_EL0(7), | |
3161 | AMU_AMEVCNTR0_EL0(8), | |
3162 | AMU_AMEVCNTR0_EL0(9), | |
3163 | AMU_AMEVCNTR0_EL0(10), | |
3164 | AMU_AMEVCNTR0_EL0(11), | |
3165 | AMU_AMEVCNTR0_EL0(12), | |
3166 | AMU_AMEVCNTR0_EL0(13), | |
3167 | AMU_AMEVCNTR0_EL0(14), | |
3168 | AMU_AMEVCNTR0_EL0(15), | |
493cf9b7 VM |
3169 | AMU_AMEVTYPER0_EL0(0), |
3170 | AMU_AMEVTYPER0_EL0(1), | |
3171 | AMU_AMEVTYPER0_EL0(2), | |
3172 | AMU_AMEVTYPER0_EL0(3), | |
3173 | AMU_AMEVTYPER0_EL0(4), | |
3174 | AMU_AMEVTYPER0_EL0(5), | |
3175 | AMU_AMEVTYPER0_EL0(6), | |
3176 | AMU_AMEVTYPER0_EL0(7), | |
3177 | AMU_AMEVTYPER0_EL0(8), | |
3178 | AMU_AMEVTYPER0_EL0(9), | |
3179 | AMU_AMEVTYPER0_EL0(10), | |
3180 | AMU_AMEVTYPER0_EL0(11), | |
3181 | AMU_AMEVTYPER0_EL0(12), | |
3182 | AMU_AMEVTYPER0_EL0(13), | |
3183 | AMU_AMEVTYPER0_EL0(14), | |
3184 | AMU_AMEVTYPER0_EL0(15), | |
4fcdf106 IV |
3185 | AMU_AMEVCNTR1_EL0(0), |
3186 | AMU_AMEVCNTR1_EL0(1), | |
3187 | AMU_AMEVCNTR1_EL0(2), | |
3188 | AMU_AMEVCNTR1_EL0(3), | |
3189 | AMU_AMEVCNTR1_EL0(4), | |
3190 | AMU_AMEVCNTR1_EL0(5), | |
3191 | AMU_AMEVCNTR1_EL0(6), | |
3192 | AMU_AMEVCNTR1_EL0(7), | |
3193 | AMU_AMEVCNTR1_EL0(8), | |
3194 | AMU_AMEVCNTR1_EL0(9), | |
3195 | AMU_AMEVCNTR1_EL0(10), | |
3196 | AMU_AMEVCNTR1_EL0(11), | |
3197 | AMU_AMEVCNTR1_EL0(12), | |
3198 | AMU_AMEVCNTR1_EL0(13), | |
3199 | AMU_AMEVCNTR1_EL0(14), | |
3200 | AMU_AMEVCNTR1_EL0(15), | |
493cf9b7 VM |
3201 | AMU_AMEVTYPER1_EL0(0), |
3202 | AMU_AMEVTYPER1_EL0(1), | |
3203 | AMU_AMEVTYPER1_EL0(2), | |
3204 | AMU_AMEVTYPER1_EL0(3), | |
3205 | AMU_AMEVTYPER1_EL0(4), | |
3206 | AMU_AMEVTYPER1_EL0(5), | |
3207 | AMU_AMEVTYPER1_EL0(6), | |
3208 | AMU_AMEVTYPER1_EL0(7), | |
3209 | AMU_AMEVTYPER1_EL0(8), | |
3210 | AMU_AMEVTYPER1_EL0(9), | |
3211 | AMU_AMEVTYPER1_EL0(10), | |
3212 | AMU_AMEVTYPER1_EL0(11), | |
3213 | AMU_AMEVTYPER1_EL0(12), | |
3214 | AMU_AMEVTYPER1_EL0(13), | |
3215 | AMU_AMEVTYPER1_EL0(14), | |
3216 | AMU_AMEVTYPER1_EL0(15), | |
62a89c44 | 3217 | |
c605ee24 | 3218 | { SYS_DESC(SYS_CNTPCT_EL0), access_arch_timer }, |
b59dbb91 | 3219 | { SYS_DESC(SYS_CNTVCT_EL0), access_arch_timer }, |
c605ee24 | 3220 | { SYS_DESC(SYS_CNTPCTSS_EL0), access_arch_timer }, |
b59dbb91 | 3221 | { SYS_DESC(SYS_CNTVCTSS_EL0), access_arch_timer }, |
84135d3d AP |
3222 | { SYS_DESC(SYS_CNTP_TVAL_EL0), access_arch_timer }, |
3223 | { SYS_DESC(SYS_CNTP_CTL_EL0), access_arch_timer }, | |
3224 | { SYS_DESC(SYS_CNTP_CVAL_EL0), access_arch_timer }, | |
c9a3c58f | 3225 | |
b59dbb91 MZ |
3226 | { SYS_DESC(SYS_CNTV_TVAL_EL0), access_arch_timer }, |
3227 | { SYS_DESC(SYS_CNTV_CTL_EL0), access_arch_timer }, | |
3228 | { SYS_DESC(SYS_CNTV_CVAL_EL0), access_arch_timer }, | |
3229 | ||
051ff581 SZ |
3230 | /* PMEVCNTRn_EL0 */ |
3231 | PMU_PMEVCNTR_EL0(0), | |
3232 | PMU_PMEVCNTR_EL0(1), | |
3233 | PMU_PMEVCNTR_EL0(2), | |
3234 | PMU_PMEVCNTR_EL0(3), | |
3235 | PMU_PMEVCNTR_EL0(4), | |
3236 | PMU_PMEVCNTR_EL0(5), | |
3237 | PMU_PMEVCNTR_EL0(6), | |
3238 | PMU_PMEVCNTR_EL0(7), | |
3239 | PMU_PMEVCNTR_EL0(8), | |
3240 | PMU_PMEVCNTR_EL0(9), | |
3241 | PMU_PMEVCNTR_EL0(10), | |
3242 | PMU_PMEVCNTR_EL0(11), | |
3243 | PMU_PMEVCNTR_EL0(12), | |
3244 | PMU_PMEVCNTR_EL0(13), | |
3245 | PMU_PMEVCNTR_EL0(14), | |
3246 | PMU_PMEVCNTR_EL0(15), | |
3247 | PMU_PMEVCNTR_EL0(16), | |
3248 | PMU_PMEVCNTR_EL0(17), | |
3249 | PMU_PMEVCNTR_EL0(18), | |
3250 | PMU_PMEVCNTR_EL0(19), | |
3251 | PMU_PMEVCNTR_EL0(20), | |
3252 | PMU_PMEVCNTR_EL0(21), | |
3253 | PMU_PMEVCNTR_EL0(22), | |
3254 | PMU_PMEVCNTR_EL0(23), | |
3255 | PMU_PMEVCNTR_EL0(24), | |
3256 | PMU_PMEVCNTR_EL0(25), | |
3257 | PMU_PMEVCNTR_EL0(26), | |
3258 | PMU_PMEVCNTR_EL0(27), | |
3259 | PMU_PMEVCNTR_EL0(28), | |
3260 | PMU_PMEVCNTR_EL0(29), | |
3261 | PMU_PMEVCNTR_EL0(30), | |
9feb21ac SZ |
3262 | /* PMEVTYPERn_EL0 */ |
3263 | PMU_PMEVTYPER_EL0(0), | |
3264 | PMU_PMEVTYPER_EL0(1), | |
3265 | PMU_PMEVTYPER_EL0(2), | |
3266 | PMU_PMEVTYPER_EL0(3), | |
3267 | PMU_PMEVTYPER_EL0(4), | |
3268 | PMU_PMEVTYPER_EL0(5), | |
3269 | PMU_PMEVTYPER_EL0(6), | |
3270 | PMU_PMEVTYPER_EL0(7), | |
3271 | PMU_PMEVTYPER_EL0(8), | |
3272 | PMU_PMEVTYPER_EL0(9), | |
3273 | PMU_PMEVTYPER_EL0(10), | |
3274 | PMU_PMEVTYPER_EL0(11), | |
3275 | PMU_PMEVTYPER_EL0(12), | |
3276 | PMU_PMEVTYPER_EL0(13), | |
3277 | PMU_PMEVTYPER_EL0(14), | |
3278 | PMU_PMEVTYPER_EL0(15), | |
3279 | PMU_PMEVTYPER_EL0(16), | |
3280 | PMU_PMEVTYPER_EL0(17), | |
3281 | PMU_PMEVTYPER_EL0(18), | |
3282 | PMU_PMEVTYPER_EL0(19), | |
3283 | PMU_PMEVTYPER_EL0(20), | |
3284 | PMU_PMEVTYPER_EL0(21), | |
3285 | PMU_PMEVTYPER_EL0(22), | |
3286 | PMU_PMEVTYPER_EL0(23), | |
3287 | PMU_PMEVTYPER_EL0(24), | |
3288 | PMU_PMEVTYPER_EL0(25), | |
3289 | PMU_PMEVTYPER_EL0(26), | |
3290 | PMU_PMEVTYPER_EL0(27), | |
3291 | PMU_PMEVTYPER_EL0(28), | |
3292 | PMU_PMEVTYPER_EL0(29), | |
3293 | PMU_PMEVTYPER_EL0(30), | |
174ed3e4 MR |
3294 | /* |
3295 | * PMCCFILTR_EL0 resets as unknown in 64bit mode while it resets as zero | |
9feb21ac SZ |
3296 | * in 32bit mode. Here we choose to reset it as zero for consistency. |
3297 | */ | |
9d2a55b4 | 3298 | { PMU_SYS_REG(PMCCFILTR_EL0), .access = access_pmu_evtyper, |
11663111 | 3299 | .reset = reset_val, .reg = PMCCFILTR_EL0, .val = 0 }, |
051ff581 | 3300 | |
9b9cce60 MZ |
3301 | EL2_REG_VNCR(VPIDR_EL2, reset_unknown, 0), |
3302 | EL2_REG_VNCR(VMPIDR_EL2, reset_unknown, 0), | |
6ff9dc23 JL |
3303 | EL2_REG(SCTLR_EL2, access_rw, reset_val, SCTLR_EL2_RES1), |
3304 | EL2_REG(ACTLR_EL2, access_rw, reset_val, 0), | |
94f29ab2 | 3305 | EL2_REG_VNCR(HCR_EL2, reset_hcr, 0), |
c8823e51 | 3306 | EL2_REG(MDCR_EL2, access_mdcr, reset_mdcr, 0), |
75c76ab5 | 3307 | EL2_REG(CPTR_EL2, access_rw, reset_val, CPTR_NVHE_EL2_RES1), |
9b9cce60 MZ |
3308 | EL2_REG_VNCR(HSTR_EL2, reset_val, 0), |
3309 | EL2_REG_VNCR(HFGRTR_EL2, reset_val, 0), | |
3310 | EL2_REG_VNCR(HFGWTR_EL2, reset_val, 0), | |
3311 | EL2_REG_VNCR(HFGITR_EL2, reset_val, 0), | |
3312 | EL2_REG_VNCR(HACR_EL2, reset_val, 0), | |
6ff9dc23 | 3313 | |
997eeeca MB |
3314 | EL2_REG_FILTERED(ZCR_EL2, access_zcr_el2, reset_val, 0, |
3315 | sve_el2_visibility), | |
b3d29a82 | 3316 | |
9b9cce60 | 3317 | EL2_REG_VNCR(HCRX_EL2, reset_val, 0), |
03fb54d0 | 3318 | |
6ff9dc23 JL |
3319 | EL2_REG(TTBR0_EL2, access_rw, reset_val, 0), |
3320 | EL2_REG(TTBR1_EL2, access_rw, reset_val, 0), | |
3321 | EL2_REG(TCR_EL2, access_rw, reset_val, TCR_EL2_RES1), | |
b4824120 | 3322 | EL2_REG_FILTERED(TCR2_EL2, access_rw, reset_val, TCR2_EL2_RES1, |
0fcb4eea | 3323 | tcr2_el2_visibility), |
9b9cce60 MZ |
3324 | EL2_REG_VNCR(VTTBR_EL2, reset_val, 0), |
3325 | EL2_REG_VNCR(VTCR_EL2, reset_val, 0), | |
6fb75733 MZ |
3326 | EL2_REG_FILTERED(VNCR_EL2, bad_vncr_trap, reset_val, 0, |
3327 | vncr_el2_visibility), | |
6ff9dc23 | 3328 | |
cd08d321 | 3329 | { SYS_DESC(SYS_DACR32_EL2), undef_access, reset_unknown, DACR32_EL2 }, |
9b9cce60 MZ |
3330 | EL2_REG_VNCR(HDFGRTR_EL2, reset_val, 0), |
3331 | EL2_REG_VNCR(HDFGWTR_EL2, reset_val, 0), | |
d016264d | 3332 | EL2_REG_VNCR(HAFGRTR_EL2, reset_val, 0), |
9b9cce60 MZ |
3333 | EL2_REG_REDIR(SPSR_EL2, reset_val, 0), |
3334 | EL2_REG_REDIR(ELR_EL2, reset_val, 0), | |
6ff9dc23 JL |
3335 | { SYS_DESC(SYS_SP_EL1), access_sp_el1}, |
3336 | ||
3f7915cc | 3337 | /* AArch32 SPSR_* are RES0 if trapped from a NV guest */ |
84ed4545 MZ |
3338 | { SYS_DESC(SYS_SPSR_irq), .access = trap_raz_wi }, |
3339 | { SYS_DESC(SYS_SPSR_abt), .access = trap_raz_wi }, | |
3340 | { SYS_DESC(SYS_SPSR_und), .access = trap_raz_wi }, | |
3341 | { SYS_DESC(SYS_SPSR_fiq), .access = trap_raz_wi }, | |
3f7915cc | 3342 | |
cd08d321 | 3343 | { SYS_DESC(SYS_IFSR32_EL2), undef_access, reset_unknown, IFSR32_EL2 }, |
6ff9dc23 JL |
3344 | EL2_REG(AFSR0_EL2, access_rw, reset_val, 0), |
3345 | EL2_REG(AFSR1_EL2, access_rw, reset_val, 0), | |
9b9cce60 | 3346 | EL2_REG_REDIR(ESR_EL2, reset_val, 0), |
cd08d321 | 3347 | { SYS_DESC(SYS_FPEXC32_EL2), undef_access, reset_val, FPEXC32_EL2, 0x700 }, |
6ff9dc23 | 3348 | |
9b9cce60 | 3349 | EL2_REG_REDIR(FAR_EL2, reset_val, 0), |
6ff9dc23 JL |
3350 | EL2_REG(HPFAR_EL2, access_rw, reset_val, 0), |
3351 | ||
3352 | EL2_REG(MAIR_EL2, access_rw, reset_val, 0), | |
b4824120 | 3353 | EL2_REG_FILTERED(PIRE0_EL2, access_rw, reset_val, 0, |
a68cddbe | 3354 | s1pie_el2_visibility), |
b4824120 | 3355 | EL2_REG_FILTERED(PIR_EL2, access_rw, reset_val, 0, |
a68cddbe | 3356 | s1pie_el2_visibility), |
5970e990 MZ |
3357 | EL2_REG_FILTERED(POR_EL2, access_rw, reset_val, 0, |
3358 | s1poe_el2_visibility), | |
6ff9dc23 | 3359 | EL2_REG(AMAIR_EL2, access_rw, reset_val, 0), |
31ff96c3 JM |
3360 | { SYS_DESC(SYS_MPAMHCR_EL2), undef_access }, |
3361 | { SYS_DESC(SYS_MPAMVPMV_EL2), undef_access }, | |
3362 | { SYS_DESC(SYS_MPAM2_EL2), undef_access }, | |
3363 | { SYS_DESC(SYS_MPAMVPM0_EL2), undef_access }, | |
3364 | { SYS_DESC(SYS_MPAMVPM1_EL2), undef_access }, | |
3365 | { SYS_DESC(SYS_MPAMVPM2_EL2), undef_access }, | |
3366 | { SYS_DESC(SYS_MPAMVPM3_EL2), undef_access }, | |
3367 | { SYS_DESC(SYS_MPAMVPM4_EL2), undef_access }, | |
3368 | { SYS_DESC(SYS_MPAMVPM5_EL2), undef_access }, | |
3369 | { SYS_DESC(SYS_MPAMVPM6_EL2), undef_access }, | |
3370 | { SYS_DESC(SYS_MPAMVPM7_EL2), undef_access }, | |
6ff9dc23 JL |
3371 | |
3372 | EL2_REG(VBAR_EL2, access_rw, reset_val, 0), | |
3373 | EL2_REG(RVBAR_EL2, access_rw, reset_val, 0), | |
cd08d321 | 3374 | { SYS_DESC(SYS_RMR_EL2), undef_access }, |
6ff9dc23 | 3375 | |
96c2f033 MZ |
3376 | EL2_REG_VNCR(ICH_AP0R0_EL2, reset_val, 0), |
3377 | EL2_REG_VNCR(ICH_AP0R1_EL2, reset_val, 0), | |
3378 | EL2_REG_VNCR(ICH_AP0R2_EL2, reset_val, 0), | |
3379 | EL2_REG_VNCR(ICH_AP0R3_EL2, reset_val, 0), | |
3380 | EL2_REG_VNCR(ICH_AP1R0_EL2, reset_val, 0), | |
3381 | EL2_REG_VNCR(ICH_AP1R1_EL2, reset_val, 0), | |
3382 | EL2_REG_VNCR(ICH_AP1R2_EL2, reset_val, 0), | |
3383 | EL2_REG_VNCR(ICH_AP1R3_EL2, reset_val, 0), | |
3384 | ||
3385 | { SYS_DESC(SYS_ICC_SRE_EL2), access_gic_sre }, | |
3386 | ||
9f5deace | 3387 | EL2_REG_VNCR(ICH_HCR_EL2, reset_val, 0), |
96c2f033 MZ |
3388 | { SYS_DESC(SYS_ICH_VTR_EL2), access_gic_vtr }, |
3389 | { SYS_DESC(SYS_ICH_MISR_EL2), access_gic_misr }, | |
3390 | { SYS_DESC(SYS_ICH_EISR_EL2), access_gic_eisr }, | |
3391 | { SYS_DESC(SYS_ICH_ELRSR_EL2), access_gic_elrsr }, | |
3392 | EL2_REG_VNCR(ICH_VMCR_EL2, reset_val, 0), | |
3393 | ||
3394 | EL2_REG_VNCR(ICH_LR0_EL2, reset_val, 0), | |
3395 | EL2_REG_VNCR(ICH_LR1_EL2, reset_val, 0), | |
3396 | EL2_REG_VNCR(ICH_LR2_EL2, reset_val, 0), | |
3397 | EL2_REG_VNCR(ICH_LR3_EL2, reset_val, 0), | |
3398 | EL2_REG_VNCR(ICH_LR4_EL2, reset_val, 0), | |
3399 | EL2_REG_VNCR(ICH_LR5_EL2, reset_val, 0), | |
3400 | EL2_REG_VNCR(ICH_LR6_EL2, reset_val, 0), | |
3401 | EL2_REG_VNCR(ICH_LR7_EL2, reset_val, 0), | |
3402 | EL2_REG_VNCR(ICH_LR8_EL2, reset_val, 0), | |
3403 | EL2_REG_VNCR(ICH_LR9_EL2, reset_val, 0), | |
3404 | EL2_REG_VNCR(ICH_LR10_EL2, reset_val, 0), | |
3405 | EL2_REG_VNCR(ICH_LR11_EL2, reset_val, 0), | |
3406 | EL2_REG_VNCR(ICH_LR12_EL2, reset_val, 0), | |
3407 | EL2_REG_VNCR(ICH_LR13_EL2, reset_val, 0), | |
3408 | EL2_REG_VNCR(ICH_LR14_EL2, reset_val, 0), | |
3409 | EL2_REG_VNCR(ICH_LR15_EL2, reset_val, 0), | |
6ff9dc23 JL |
3410 | |
3411 | EL2_REG(CONTEXTIDR_EL2, access_rw, reset_val, 0), | |
3412 | EL2_REG(TPIDR_EL2, access_rw, reset_val, 0), | |
3413 | ||
9b9cce60 | 3414 | EL2_REG_VNCR(CNTVOFF_EL2, reset_val, 0), |
6ff9dc23 | 3415 | EL2_REG(CNTHCTL_EL2, access_rw, reset_val, 0), |
b59dbb91 MZ |
3416 | { SYS_DESC(SYS_CNTHP_TVAL_EL2), access_arch_timer }, |
3417 | EL2_REG(CNTHP_CTL_EL2, access_arch_timer, reset_val, 0), | |
3418 | EL2_REG(CNTHP_CVAL_EL2, access_arch_timer, reset_val, 0), | |
3419 | ||
0e459810 MZ |
3420 | { SYS_DESC(SYS_CNTHV_TVAL_EL2), access_hv_timer }, |
3421 | EL2_REG(CNTHV_CTL_EL2, access_hv_timer, reset_val, 0), | |
3422 | EL2_REG(CNTHV_CVAL_EL2, access_hv_timer, reset_val, 0), | |
6ff9dc23 | 3423 | |
989fce63 | 3424 | { SYS_DESC(SYS_CNTKCTL_EL12), access_cntkctl_el12 }, |
280b748e | 3425 | |
b59dbb91 MZ |
3426 | { SYS_DESC(SYS_CNTP_TVAL_EL02), access_arch_timer }, |
3427 | { SYS_DESC(SYS_CNTP_CTL_EL02), access_arch_timer }, | |
3428 | { SYS_DESC(SYS_CNTP_CVAL_EL02), access_arch_timer }, | |
3429 | ||
3430 | { SYS_DESC(SYS_CNTV_TVAL_EL02), access_arch_timer }, | |
3431 | { SYS_DESC(SYS_CNTV_CTL_EL02), access_arch_timer }, | |
3432 | { SYS_DESC(SYS_CNTV_CVAL_EL02), access_arch_timer }, | |
3433 | ||
6ff9dc23 | 3434 | EL2_REG(SP_EL2, NULL, reset_unknown, 0), |
62a89c44 MZ |
3435 | }; |
3436 | ||
8df747f4 MZ |
3437 | static bool handle_at_s1e01(struct kvm_vcpu *vcpu, struct sys_reg_params *p, |
3438 | const struct sys_reg_desc *r) | |
3439 | { | |
3440 | u32 op = sys_insn(p->Op0, p->Op1, p->CRn, p->CRm, p->Op2); | |
3441 | ||
3442 | __kvm_at_s1e01(vcpu, op, p->regval); | |
3443 | ||
3444 | return true; | |
3445 | } | |
3446 | ||
3447 | static bool handle_at_s1e2(struct kvm_vcpu *vcpu, struct sys_reg_params *p, | |
3448 | const struct sys_reg_desc *r) | |
3449 | { | |
3450 | u32 op = sys_insn(p->Op0, p->Op1, p->CRn, p->CRm, p->Op2); | |
3451 | ||
ff987ffc MZ |
3452 | /* There is no FGT associated with AT S1E2A :-( */ |
3453 | if (op == OP_AT_S1E2A && | |
3454 | !kvm_has_feat(vcpu->kvm, ID_AA64ISAR2_EL1, ATS1A, IMP)) { | |
3455 | kvm_inject_undefined(vcpu); | |
3456 | return false; | |
3457 | } | |
3458 | ||
8df747f4 MZ |
3459 | __kvm_at_s1e2(vcpu, op, p->regval); |
3460 | ||
3461 | return true; | |
3462 | } | |
3463 | ||
3464 | static bool handle_at_s12(struct kvm_vcpu *vcpu, struct sys_reg_params *p, | |
3465 | const struct sys_reg_desc *r) | |
3466 | { | |
3467 | u32 op = sys_insn(p->Op0, p->Op1, p->CRn, p->CRm, p->Op2); | |
3468 | ||
3469 | __kvm_at_s12(vcpu, op, p->regval); | |
3470 | ||
3471 | return true; | |
3472 | } | |
3473 | ||
e6c9a301 MZ |
3474 | static bool kvm_supported_tlbi_s12_op(struct kvm_vcpu *vpcu, u32 instr) |
3475 | { | |
3476 | struct kvm *kvm = vpcu->kvm; | |
3477 | u8 CRm = sys_reg_CRm(instr); | |
3478 | ||
3479 | if (sys_reg_CRn(instr) == TLBI_CRn_nXS && | |
3480 | !kvm_has_feat(kvm, ID_AA64ISAR1_EL1, XS, IMP)) | |
3481 | return false; | |
3482 | ||
3483 | if (CRm == TLBI_CRm_nROS && | |
3484 | !kvm_has_feat(kvm, ID_AA64ISAR0_EL1, TLB, OS)) | |
3485 | return false; | |
3486 | ||
3487 | return true; | |
3488 | } | |
3489 | ||
5cfb6cec MZ |
3490 | static bool handle_alle1is(struct kvm_vcpu *vcpu, struct sys_reg_params *p, |
3491 | const struct sys_reg_desc *r) | |
3492 | { | |
3493 | u32 sys_encoding = sys_insn(p->Op0, p->Op1, p->CRn, p->CRm, p->Op2); | |
3494 | ||
cd08d321 MZ |
3495 | if (!kvm_supported_tlbi_s12_op(vcpu, sys_encoding)) |
3496 | return undef_access(vcpu, p, r); | |
5cfb6cec MZ |
3497 | |
3498 | write_lock(&vcpu->kvm->mmu_lock); | |
3499 | ||
3500 | /* | |
3501 | * Drop all shadow S2s, resulting in S1/S2 TLBIs for each of the | |
3502 | * corresponding VMIDs. | |
3503 | */ | |
3c164eb9 | 3504 | kvm_nested_s2_unmap(vcpu->kvm, true); |
5cfb6cec MZ |
3505 | |
3506 | write_unlock(&vcpu->kvm->mmu_lock); | |
3507 | ||
3508 | return true; | |
3509 | } | |
3510 | ||
70109bcd MZ |
3511 | static bool kvm_supported_tlbi_ipas2_op(struct kvm_vcpu *vpcu, u32 instr) |
3512 | { | |
3513 | struct kvm *kvm = vpcu->kvm; | |
3514 | u8 CRm = sys_reg_CRm(instr); | |
3515 | u8 Op2 = sys_reg_Op2(instr); | |
3516 | ||
3517 | if (sys_reg_CRn(instr) == TLBI_CRn_nXS && | |
3518 | !kvm_has_feat(kvm, ID_AA64ISAR1_EL1, XS, IMP)) | |
3519 | return false; | |
3520 | ||
3521 | if (CRm == TLBI_CRm_IPAIS && (Op2 == 2 || Op2 == 6) && | |
3522 | !kvm_has_feat(kvm, ID_AA64ISAR0_EL1, TLB, RANGE)) | |
3523 | return false; | |
3524 | ||
3525 | if (CRm == TLBI_CRm_IPAONS && (Op2 == 0 || Op2 == 4) && | |
3526 | !kvm_has_feat(kvm, ID_AA64ISAR0_EL1, TLB, OS)) | |
3527 | return false; | |
3528 | ||
3529 | if (CRm == TLBI_CRm_IPAONS && (Op2 == 3 || Op2 == 7) && | |
3530 | !kvm_has_feat(kvm, ID_AA64ISAR0_EL1, TLB, RANGE)) | |
3531 | return false; | |
3532 | ||
3533 | return true; | |
3534 | } | |
3535 | ||
8e236efa MZ |
3536 | /* Only defined here as this is an internal "abstraction" */ |
3537 | union tlbi_info { | |
3538 | struct { | |
3539 | u64 start; | |
3540 | u64 size; | |
3541 | } range; | |
3542 | ||
3543 | struct { | |
3544 | u64 addr; | |
3545 | } ipa; | |
3546 | ||
3547 | struct { | |
3548 | u64 addr; | |
3549 | u32 encoding; | |
3550 | } va; | |
3551 | }; | |
3552 | ||
e6c9a301 MZ |
3553 | static void s2_mmu_unmap_range(struct kvm_s2_mmu *mmu, |
3554 | const union tlbi_info *info) | |
3555 | { | |
79cc6cdb OU |
3556 | /* |
3557 | * The unmap operation is allowed to drop the MMU lock and block, which | |
3558 | * means that @mmu could be used for a different context than the one | |
3559 | * currently being invalidated. | |
3560 | * | |
3561 | * This behavior is still safe, as: | |
3562 | * | |
3563 | * 1) The vCPU(s) that recycled the MMU are responsible for invalidating | |
3564 | * the entire MMU before reusing it, which still honors the intent | |
3565 | * of a TLBI. | |
3566 | * | |
3567 | * 2) Until the guest TLBI instruction is 'retired' (i.e. increment PC | |
3568 | * and ERET to the guest), other vCPUs are allowed to use stale | |
3569 | * translations. | |
3570 | * | |
3571 | * 3) Accidentally unmapping an unrelated MMU context is nonfatal, and | |
3572 | * at worst may cause more aborts for shadow stage-2 fills. | |
3573 | * | |
3574 | * Dropping the MMU lock also implies that shadow stage-2 fills could | |
3575 | * happen behind the back of the TLBI. This is still safe, though, as | |
3576 | * the L1 needs to put its stage-2 in a consistent state before doing | |
3577 | * the TLBI. | |
3578 | */ | |
3c164eb9 | 3579 | kvm_stage2_unmap_range(mmu, info->range.start, info->range.size, true); |
e6c9a301 MZ |
3580 | } |
3581 | ||
3582 | static bool handle_vmalls12e1is(struct kvm_vcpu *vcpu, struct sys_reg_params *p, | |
3583 | const struct sys_reg_desc *r) | |
3584 | { | |
3585 | u32 sys_encoding = sys_insn(p->Op0, p->Op1, p->CRn, p->CRm, p->Op2); | |
3586 | u64 limit, vttbr; | |
3587 | ||
cd08d321 MZ |
3588 | if (!kvm_supported_tlbi_s12_op(vcpu, sys_encoding)) |
3589 | return undef_access(vcpu, p, r); | |
e6c9a301 MZ |
3590 | |
3591 | vttbr = vcpu_read_sys_reg(vcpu, VTTBR_EL2); | |
3592 | limit = BIT_ULL(kvm_get_pa_bits(vcpu->kvm)); | |
3593 | ||
3594 | kvm_s2_mmu_iterate_by_vmid(vcpu->kvm, get_vmid(vttbr), | |
3595 | &(union tlbi_info) { | |
3596 | .range = { | |
3597 | .start = 0, | |
3598 | .size = limit, | |
3599 | }, | |
3600 | }, | |
3601 | s2_mmu_unmap_range); | |
3602 | ||
3603 | return true; | |
3604 | } | |
3605 | ||
5d476ca5 MZ |
3606 | static bool handle_ripas2e1is(struct kvm_vcpu *vcpu, struct sys_reg_params *p, |
3607 | const struct sys_reg_desc *r) | |
3608 | { | |
3609 | u32 sys_encoding = sys_insn(p->Op0, p->Op1, p->CRn, p->CRm, p->Op2); | |
3610 | u64 vttbr = vcpu_read_sys_reg(vcpu, VTTBR_EL2); | |
85bba004 | 3611 | u64 base, range; |
5d476ca5 | 3612 | |
cd08d321 MZ |
3613 | if (!kvm_supported_tlbi_ipas2_op(vcpu, sys_encoding)) |
3614 | return undef_access(vcpu, p, r); | |
5d476ca5 MZ |
3615 | |
3616 | /* | |
3617 | * Because the shadow S2 structure doesn't necessarily reflect that | |
3618 | * of the guest's S2 (different base granule size, for example), we | |
3619 | * decide to ignore TTL and only use the described range. | |
3620 | */ | |
85bba004 | 3621 | base = decode_range_tlbi(p->regval, &range, NULL); |
5d476ca5 MZ |
3622 | |
3623 | kvm_s2_mmu_iterate_by_vmid(vcpu->kvm, get_vmid(vttbr), | |
3624 | &(union tlbi_info) { | |
3625 | .range = { | |
3626 | .start = base, | |
3627 | .size = range, | |
3628 | }, | |
3629 | }, | |
3630 | s2_mmu_unmap_range); | |
3631 | ||
3632 | return true; | |
3633 | } | |
3634 | ||
70109bcd MZ |
3635 | static void s2_mmu_unmap_ipa(struct kvm_s2_mmu *mmu, |
3636 | const union tlbi_info *info) | |
3637 | { | |
3638 | unsigned long max_size; | |
3639 | u64 base_addr; | |
3640 | ||
3641 | /* | |
3642 | * We drop a number of things from the supplied value: | |
3643 | * | |
3644 | * - NS bit: we're non-secure only. | |
3645 | * | |
70109bcd MZ |
3646 | * - IPA[51:48]: We don't support 52bit IPA just yet... |
3647 | * | |
3648 | * And of course, adjust the IPA to be on an actual address. | |
3649 | */ | |
3650 | base_addr = (info->ipa.addr & GENMASK_ULL(35, 0)) << 12; | |
d1de1576 | 3651 | max_size = compute_tlb_inval_range(mmu, info->ipa.addr); |
70109bcd MZ |
3652 | base_addr &= ~(max_size - 1); |
3653 | ||
79cc6cdb OU |
3654 | /* |
3655 | * See comment in s2_mmu_unmap_range() for why this is allowed to | |
3656 | * reschedule. | |
3657 | */ | |
3c164eb9 | 3658 | kvm_stage2_unmap_range(mmu, base_addr, max_size, true); |
70109bcd MZ |
3659 | } |
3660 | ||
3661 | static bool handle_ipas2e1is(struct kvm_vcpu *vcpu, struct sys_reg_params *p, | |
3662 | const struct sys_reg_desc *r) | |
3663 | { | |
3664 | u32 sys_encoding = sys_insn(p->Op0, p->Op1, p->CRn, p->CRm, p->Op2); | |
3665 | u64 vttbr = vcpu_read_sys_reg(vcpu, VTTBR_EL2); | |
3666 | ||
cd08d321 MZ |
3667 | if (!kvm_supported_tlbi_ipas2_op(vcpu, sys_encoding)) |
3668 | return undef_access(vcpu, p, r); | |
70109bcd MZ |
3669 | |
3670 | kvm_s2_mmu_iterate_by_vmid(vcpu->kvm, get_vmid(vttbr), | |
3671 | &(union tlbi_info) { | |
3672 | .ipa = { | |
3673 | .addr = p->regval, | |
3674 | }, | |
3675 | }, | |
3676 | s2_mmu_unmap_ipa); | |
3677 | ||
3678 | return true; | |
3679 | } | |
3680 | ||
8e236efa MZ |
3681 | static void s2_mmu_tlbi_s1e1(struct kvm_s2_mmu *mmu, |
3682 | const union tlbi_info *info) | |
3683 | { | |
3684 | WARN_ON(__kvm_tlbi_s1e2(mmu, info->va.addr, info->va.encoding)); | |
3685 | } | |
3686 | ||
aa98df31 MZ |
3687 | static bool handle_tlbi_el2(struct kvm_vcpu *vcpu, struct sys_reg_params *p, |
3688 | const struct sys_reg_desc *r) | |
3689 | { | |
3690 | u32 sys_encoding = sys_insn(p->Op0, p->Op1, p->CRn, p->CRm, p->Op2); | |
3691 | ||
3692 | if (!kvm_supported_tlbi_s1e2_op(vcpu, sys_encoding)) | |
3693 | return undef_access(vcpu, p, r); | |
3694 | ||
3695 | kvm_handle_s1e2_tlbi(vcpu, sys_encoding, p->regval); | |
3696 | return true; | |
3697 | } | |
3698 | ||
8e236efa MZ |
3699 | static bool handle_tlbi_el1(struct kvm_vcpu *vcpu, struct sys_reg_params *p, |
3700 | const struct sys_reg_desc *r) | |
3701 | { | |
3702 | u32 sys_encoding = sys_insn(p->Op0, p->Op1, p->CRn, p->CRm, p->Op2); | |
8e236efa MZ |
3703 | |
3704 | /* | |
3705 | * If we're here, this is because we've trapped on a EL1 TLBI | |
3706 | * instruction that affects the EL1 translation regime while | |
3707 | * we're running in a context that doesn't allow us to let the | |
3708 | * HW do its thing (aka vEL2): | |
3709 | * | |
3710 | * - HCR_EL2.E2H == 0 : a non-VHE guest | |
3711 | * - HCR_EL2.{E2H,TGE} == { 1, 0 } : a VHE guest in guest mode | |
3712 | * | |
aa98df31 MZ |
3713 | * Another possibility is that we are invalidating the EL2 context |
3714 | * using EL1 instructions, but that we landed here because we need | |
3715 | * additional invalidation for structures that are not held in the | |
3716 | * CPU TLBs (such as the VNCR pseudo-TLB and its EL2 mapping). In | |
3717 | * that case, we are guaranteed that HCR_EL2.{E2H,TGE} == { 1, 1 } | |
3718 | * as we don't allow an NV-capable L1 in a nVHE configuration. | |
3719 | * | |
8e236efa MZ |
3720 | * We don't expect these helpers to ever be called when running |
3721 | * in a vEL1 context. | |
3722 | */ | |
3723 | ||
3724 | WARN_ON(!vcpu_is_el2(vcpu)); | |
3725 | ||
cd08d321 MZ |
3726 | if (!kvm_supported_tlbi_s1e1_op(vcpu, sys_encoding)) |
3727 | return undef_access(vcpu, p, r); | |
8e236efa | 3728 | |
aa98df31 MZ |
3729 | if (vcpu_el2_e2h_is_set(vcpu) && vcpu_el2_tge_is_set(vcpu)) { |
3730 | kvm_handle_s1e2_tlbi(vcpu, sys_encoding, p->regval); | |
3731 | return true; | |
3732 | } | |
3733 | ||
3734 | kvm_s2_mmu_iterate_by_vmid(vcpu->kvm, | |
3735 | get_vmid(__vcpu_sys_reg(vcpu, VTTBR_EL2)), | |
8e236efa MZ |
3736 | &(union tlbi_info) { |
3737 | .va = { | |
3738 | .addr = p->regval, | |
3739 | .encoding = sys_encoding, | |
3740 | }, | |
3741 | }, | |
3742 | s2_mmu_tlbi_s1e1); | |
3743 | ||
3744 | return true; | |
3745 | } | |
3746 | ||
3747 | #define SYS_INSN(insn, access_fn) \ | |
3748 | { \ | |
3749 | SYS_DESC(OP_##insn), \ | |
3750 | .access = (access_fn), \ | |
3751 | } | |
3752 | ||
89bc63fa MZ |
3753 | static struct sys_reg_desc sys_insn_descs[] = { |
3754 | { SYS_DESC(SYS_DC_ISW), access_dcsw }, | |
3755 | { SYS_DESC(SYS_DC_IGSW), access_dcgsw }, | |
3756 | { SYS_DESC(SYS_DC_IGDSW), access_dcgsw }, | |
8df747f4 MZ |
3757 | |
3758 | SYS_INSN(AT_S1E1R, handle_at_s1e01), | |
3759 | SYS_INSN(AT_S1E1W, handle_at_s1e01), | |
3760 | SYS_INSN(AT_S1E0R, handle_at_s1e01), | |
3761 | SYS_INSN(AT_S1E0W, handle_at_s1e01), | |
3762 | SYS_INSN(AT_S1E1RP, handle_at_s1e01), | |
3763 | SYS_INSN(AT_S1E1WP, handle_at_s1e01), | |
3764 | ||
89bc63fa MZ |
3765 | { SYS_DESC(SYS_DC_CSW), access_dcsw }, |
3766 | { SYS_DESC(SYS_DC_CGSW), access_dcgsw }, | |
3767 | { SYS_DESC(SYS_DC_CGDSW), access_dcgsw }, | |
3768 | { SYS_DESC(SYS_DC_CISW), access_dcsw }, | |
3769 | { SYS_DESC(SYS_DC_CIGSW), access_dcgsw }, | |
3770 | { SYS_DESC(SYS_DC_CIGDSW), access_dcgsw }, | |
8e236efa | 3771 | |
0cb8aae2 MZ |
3772 | SYS_INSN(TLBI_VMALLE1OS, handle_tlbi_el1), |
3773 | SYS_INSN(TLBI_VAE1OS, handle_tlbi_el1), | |
3774 | SYS_INSN(TLBI_ASIDE1OS, handle_tlbi_el1), | |
3775 | SYS_INSN(TLBI_VAAE1OS, handle_tlbi_el1), | |
3776 | SYS_INSN(TLBI_VALE1OS, handle_tlbi_el1), | |
3777 | SYS_INSN(TLBI_VAALE1OS, handle_tlbi_el1), | |
3778 | ||
5d476ca5 MZ |
3779 | SYS_INSN(TLBI_RVAE1IS, handle_tlbi_el1), |
3780 | SYS_INSN(TLBI_RVAAE1IS, handle_tlbi_el1), | |
3781 | SYS_INSN(TLBI_RVALE1IS, handle_tlbi_el1), | |
3782 | SYS_INSN(TLBI_RVAALE1IS, handle_tlbi_el1), | |
3783 | ||
8e236efa MZ |
3784 | SYS_INSN(TLBI_VMALLE1IS, handle_tlbi_el1), |
3785 | SYS_INSN(TLBI_VAE1IS, handle_tlbi_el1), | |
3786 | SYS_INSN(TLBI_ASIDE1IS, handle_tlbi_el1), | |
3787 | SYS_INSN(TLBI_VAAE1IS, handle_tlbi_el1), | |
3788 | SYS_INSN(TLBI_VALE1IS, handle_tlbi_el1), | |
3789 | SYS_INSN(TLBI_VAALE1IS, handle_tlbi_el1), | |
5d476ca5 MZ |
3790 | |
3791 | SYS_INSN(TLBI_RVAE1OS, handle_tlbi_el1), | |
3792 | SYS_INSN(TLBI_RVAAE1OS, handle_tlbi_el1), | |
3793 | SYS_INSN(TLBI_RVALE1OS, handle_tlbi_el1), | |
3794 | SYS_INSN(TLBI_RVAALE1OS, handle_tlbi_el1), | |
3795 | ||
3796 | SYS_INSN(TLBI_RVAE1, handle_tlbi_el1), | |
3797 | SYS_INSN(TLBI_RVAAE1, handle_tlbi_el1), | |
3798 | SYS_INSN(TLBI_RVALE1, handle_tlbi_el1), | |
3799 | SYS_INSN(TLBI_RVAALE1, handle_tlbi_el1), | |
3800 | ||
8e236efa MZ |
3801 | SYS_INSN(TLBI_VMALLE1, handle_tlbi_el1), |
3802 | SYS_INSN(TLBI_VAE1, handle_tlbi_el1), | |
3803 | SYS_INSN(TLBI_ASIDE1, handle_tlbi_el1), | |
3804 | SYS_INSN(TLBI_VAAE1, handle_tlbi_el1), | |
3805 | SYS_INSN(TLBI_VALE1, handle_tlbi_el1), | |
3806 | SYS_INSN(TLBI_VAALE1, handle_tlbi_el1), | |
e6c9a301 | 3807 | |
0feec776 MZ |
3808 | SYS_INSN(TLBI_VMALLE1OSNXS, handle_tlbi_el1), |
3809 | SYS_INSN(TLBI_VAE1OSNXS, handle_tlbi_el1), | |
3810 | SYS_INSN(TLBI_ASIDE1OSNXS, handle_tlbi_el1), | |
3811 | SYS_INSN(TLBI_VAAE1OSNXS, handle_tlbi_el1), | |
3812 | SYS_INSN(TLBI_VALE1OSNXS, handle_tlbi_el1), | |
3813 | SYS_INSN(TLBI_VAALE1OSNXS, handle_tlbi_el1), | |
3814 | ||
3815 | SYS_INSN(TLBI_RVAE1ISNXS, handle_tlbi_el1), | |
3816 | SYS_INSN(TLBI_RVAAE1ISNXS, handle_tlbi_el1), | |
3817 | SYS_INSN(TLBI_RVALE1ISNXS, handle_tlbi_el1), | |
3818 | SYS_INSN(TLBI_RVAALE1ISNXS, handle_tlbi_el1), | |
3819 | ||
3820 | SYS_INSN(TLBI_VMALLE1ISNXS, handle_tlbi_el1), | |
3821 | SYS_INSN(TLBI_VAE1ISNXS, handle_tlbi_el1), | |
3822 | SYS_INSN(TLBI_ASIDE1ISNXS, handle_tlbi_el1), | |
3823 | SYS_INSN(TLBI_VAAE1ISNXS, handle_tlbi_el1), | |
3824 | SYS_INSN(TLBI_VALE1ISNXS, handle_tlbi_el1), | |
3825 | SYS_INSN(TLBI_VAALE1ISNXS, handle_tlbi_el1), | |
3826 | ||
3827 | SYS_INSN(TLBI_RVAE1OSNXS, handle_tlbi_el1), | |
3828 | SYS_INSN(TLBI_RVAAE1OSNXS, handle_tlbi_el1), | |
3829 | SYS_INSN(TLBI_RVALE1OSNXS, handle_tlbi_el1), | |
3830 | SYS_INSN(TLBI_RVAALE1OSNXS, handle_tlbi_el1), | |
3831 | ||
3832 | SYS_INSN(TLBI_RVAE1NXS, handle_tlbi_el1), | |
3833 | SYS_INSN(TLBI_RVAAE1NXS, handle_tlbi_el1), | |
3834 | SYS_INSN(TLBI_RVALE1NXS, handle_tlbi_el1), | |
3835 | SYS_INSN(TLBI_RVAALE1NXS, handle_tlbi_el1), | |
3836 | ||
3837 | SYS_INSN(TLBI_VMALLE1NXS, handle_tlbi_el1), | |
3838 | SYS_INSN(TLBI_VAE1NXS, handle_tlbi_el1), | |
3839 | SYS_INSN(TLBI_ASIDE1NXS, handle_tlbi_el1), | |
3840 | SYS_INSN(TLBI_VAAE1NXS, handle_tlbi_el1), | |
3841 | SYS_INSN(TLBI_VALE1NXS, handle_tlbi_el1), | |
3842 | SYS_INSN(TLBI_VAALE1NXS, handle_tlbi_el1), | |
3843 | ||
8df747f4 MZ |
3844 | SYS_INSN(AT_S1E2R, handle_at_s1e2), |
3845 | SYS_INSN(AT_S1E2W, handle_at_s1e2), | |
3846 | SYS_INSN(AT_S12E1R, handle_at_s12), | |
3847 | SYS_INSN(AT_S12E1W, handle_at_s12), | |
3848 | SYS_INSN(AT_S12E0R, handle_at_s12), | |
3849 | SYS_INSN(AT_S12E0W, handle_at_s12), | |
ff987ffc | 3850 | SYS_INSN(AT_S1E2A, handle_at_s1e2), |
8df747f4 | 3851 | |
70109bcd | 3852 | SYS_INSN(TLBI_IPAS2E1IS, handle_ipas2e1is), |
5d476ca5 | 3853 | SYS_INSN(TLBI_RIPAS2E1IS, handle_ripas2e1is), |
70109bcd | 3854 | SYS_INSN(TLBI_IPAS2LE1IS, handle_ipas2e1is), |
5d476ca5 | 3855 | SYS_INSN(TLBI_RIPAS2LE1IS, handle_ripas2e1is), |
70109bcd | 3856 | |
aa98df31 MZ |
3857 | SYS_INSN(TLBI_ALLE2OS, handle_tlbi_el2), |
3858 | SYS_INSN(TLBI_VAE2OS, handle_tlbi_el2), | |
0cb8aae2 | 3859 | SYS_INSN(TLBI_ALLE1OS, handle_alle1is), |
aa98df31 | 3860 | SYS_INSN(TLBI_VALE2OS, handle_tlbi_el2), |
0cb8aae2 MZ |
3861 | SYS_INSN(TLBI_VMALLS12E1OS, handle_vmalls12e1is), |
3862 | ||
aa98df31 MZ |
3863 | SYS_INSN(TLBI_RVAE2IS, handle_tlbi_el2), |
3864 | SYS_INSN(TLBI_RVALE2IS, handle_tlbi_el2), | |
3865 | SYS_INSN(TLBI_ALLE2IS, handle_tlbi_el2), | |
3866 | SYS_INSN(TLBI_VAE2IS, handle_tlbi_el2), | |
5d476ca5 | 3867 | |
5cfb6cec | 3868 | SYS_INSN(TLBI_ALLE1IS, handle_alle1is), |
aa98df31 MZ |
3869 | |
3870 | SYS_INSN(TLBI_VALE2IS, handle_tlbi_el2), | |
3871 | ||
e6c9a301 | 3872 | SYS_INSN(TLBI_VMALLS12E1IS, handle_vmalls12e1is), |
0cb8aae2 | 3873 | SYS_INSN(TLBI_IPAS2E1OS, handle_ipas2e1is), |
70109bcd | 3874 | SYS_INSN(TLBI_IPAS2E1, handle_ipas2e1is), |
5d476ca5 MZ |
3875 | SYS_INSN(TLBI_RIPAS2E1, handle_ripas2e1is), |
3876 | SYS_INSN(TLBI_RIPAS2E1OS, handle_ripas2e1is), | |
0cb8aae2 | 3877 | SYS_INSN(TLBI_IPAS2LE1OS, handle_ipas2e1is), |
70109bcd | 3878 | SYS_INSN(TLBI_IPAS2LE1, handle_ipas2e1is), |
5d476ca5 MZ |
3879 | SYS_INSN(TLBI_RIPAS2LE1, handle_ripas2e1is), |
3880 | SYS_INSN(TLBI_RIPAS2LE1OS, handle_ripas2e1is), | |
aa98df31 MZ |
3881 | SYS_INSN(TLBI_RVAE2OS, handle_tlbi_el2), |
3882 | SYS_INSN(TLBI_RVALE2OS, handle_tlbi_el2), | |
3883 | SYS_INSN(TLBI_RVAE2, handle_tlbi_el2), | |
3884 | SYS_INSN(TLBI_RVALE2, handle_tlbi_el2), | |
3885 | SYS_INSN(TLBI_ALLE2, handle_tlbi_el2), | |
3886 | SYS_INSN(TLBI_VAE2, handle_tlbi_el2), | |
3887 | ||
5cfb6cec | 3888 | SYS_INSN(TLBI_ALLE1, handle_alle1is), |
aa98df31 MZ |
3889 | |
3890 | SYS_INSN(TLBI_VALE2, handle_tlbi_el2), | |
3891 | ||
e6c9a301 | 3892 | SYS_INSN(TLBI_VMALLS12E1, handle_vmalls12e1is), |
0feec776 MZ |
3893 | |
3894 | SYS_INSN(TLBI_IPAS2E1ISNXS, handle_ipas2e1is), | |
3895 | SYS_INSN(TLBI_RIPAS2E1ISNXS, handle_ripas2e1is), | |
3896 | SYS_INSN(TLBI_IPAS2LE1ISNXS, handle_ipas2e1is), | |
3897 | SYS_INSN(TLBI_RIPAS2LE1ISNXS, handle_ripas2e1is), | |
3898 | ||
aa98df31 MZ |
3899 | SYS_INSN(TLBI_ALLE2OSNXS, handle_tlbi_el2), |
3900 | SYS_INSN(TLBI_VAE2OSNXS, handle_tlbi_el2), | |
0feec776 | 3901 | SYS_INSN(TLBI_ALLE1OSNXS, handle_alle1is), |
aa98df31 | 3902 | SYS_INSN(TLBI_VALE2OSNXS, handle_tlbi_el2), |
0feec776 MZ |
3903 | SYS_INSN(TLBI_VMALLS12E1OSNXS, handle_vmalls12e1is), |
3904 | ||
aa98df31 MZ |
3905 | SYS_INSN(TLBI_RVAE2ISNXS, handle_tlbi_el2), |
3906 | SYS_INSN(TLBI_RVALE2ISNXS, handle_tlbi_el2), | |
3907 | SYS_INSN(TLBI_ALLE2ISNXS, handle_tlbi_el2), | |
3908 | SYS_INSN(TLBI_VAE2ISNXS, handle_tlbi_el2), | |
0feec776 MZ |
3909 | |
3910 | SYS_INSN(TLBI_ALLE1ISNXS, handle_alle1is), | |
aa98df31 | 3911 | SYS_INSN(TLBI_VALE2ISNXS, handle_tlbi_el2), |
0feec776 MZ |
3912 | SYS_INSN(TLBI_VMALLS12E1ISNXS, handle_vmalls12e1is), |
3913 | SYS_INSN(TLBI_IPAS2E1OSNXS, handle_ipas2e1is), | |
3914 | SYS_INSN(TLBI_IPAS2E1NXS, handle_ipas2e1is), | |
3915 | SYS_INSN(TLBI_RIPAS2E1NXS, handle_ripas2e1is), | |
3916 | SYS_INSN(TLBI_RIPAS2E1OSNXS, handle_ripas2e1is), | |
3917 | SYS_INSN(TLBI_IPAS2LE1OSNXS, handle_ipas2e1is), | |
3918 | SYS_INSN(TLBI_IPAS2LE1NXS, handle_ipas2e1is), | |
3919 | SYS_INSN(TLBI_RIPAS2LE1NXS, handle_ripas2e1is), | |
3920 | SYS_INSN(TLBI_RIPAS2LE1OSNXS, handle_ripas2e1is), | |
aa98df31 MZ |
3921 | SYS_INSN(TLBI_RVAE2OSNXS, handle_tlbi_el2), |
3922 | SYS_INSN(TLBI_RVALE2OSNXS, handle_tlbi_el2), | |
3923 | SYS_INSN(TLBI_RVAE2NXS, handle_tlbi_el2), | |
3924 | SYS_INSN(TLBI_RVALE2NXS, handle_tlbi_el2), | |
3925 | SYS_INSN(TLBI_ALLE2NXS, handle_tlbi_el2), | |
3926 | SYS_INSN(TLBI_VAE2NXS, handle_tlbi_el2), | |
0feec776 | 3927 | SYS_INSN(TLBI_ALLE1NXS, handle_alle1is), |
aa98df31 | 3928 | SYS_INSN(TLBI_VALE2NXS, handle_tlbi_el2), |
0feec776 | 3929 | SYS_INSN(TLBI_VMALLS12E1NXS, handle_vmalls12e1is), |
89bc63fa MZ |
3930 | }; |
3931 | ||
8c358b29 | 3932 | static bool trap_dbgdidr(struct kvm_vcpu *vcpu, |
3fec037d | 3933 | struct sys_reg_params *p, |
bdfb4b38 MZ |
3934 | const struct sys_reg_desc *r) |
3935 | { | |
3936 | if (p->is_write) { | |
3937 | return ignore_write(vcpu, p); | |
3938 | } else { | |
97ca3fcc | 3939 | u64 dfr = kvm_read_vm_id_reg(vcpu->kvm, SYS_ID_AA64DFR0_EL1); |
c62d7a23 | 3940 | u32 el3 = kvm_has_feat(vcpu->kvm, ID_AA64PFR0_EL1, EL3, IMP); |
bdfb4b38 | 3941 | |
5a23e5c7 OU |
3942 | p->regval = ((SYS_FIELD_GET(ID_AA64DFR0_EL1, WRPs, dfr) << 28) | |
3943 | (SYS_FIELD_GET(ID_AA64DFR0_EL1, BRPs, dfr) << 24) | | |
3944 | (SYS_FIELD_GET(ID_AA64DFR0_EL1, CTX_CMPs, dfr) << 20) | | |
3945 | (SYS_FIELD_GET(ID_AA64DFR0_EL1, DebugVer, dfr) << 16) | | |
3946 | (1 << 15) | (el3 << 14) | (el3 << 12)); | |
bdfb4b38 MZ |
3947 | return true; |
3948 | } | |
3949 | } | |
3950 | ||
1da42c34 MZ |
3951 | /* |
3952 | * AArch32 debug register mappings | |
84e690bf AB |
3953 | * |
3954 | * AArch32 DBGBVRn is mapped to DBGBVRn_EL1[31:0] | |
3955 | * AArch32 DBGBXVRn is mapped to DBGBVRn_EL1[63:32] | |
3956 | * | |
1da42c34 MZ |
3957 | * None of the other registers share their location, so treat them as |
3958 | * if they were 64bit. | |
84e690bf | 3959 | */ |
3ce9f335 OU |
3960 | #define DBG_BCR_BVR_WCR_WVR(n) \ |
3961 | /* DBGBVRn */ \ | |
3962 | { AA32(LO), Op1( 0), CRn( 0), CRm((n)), Op2( 4), \ | |
3963 | trap_dbg_wb_reg, NULL, n }, \ | |
3964 | /* DBGBCRn */ \ | |
3965 | { Op1( 0), CRn( 0), CRm((n)), Op2( 5), trap_dbg_wb_reg, NULL, n }, \ | |
3966 | /* DBGWVRn */ \ | |
3967 | { Op1( 0), CRn( 0), CRm((n)), Op2( 6), trap_dbg_wb_reg, NULL, n }, \ | |
3968 | /* DBGWCRn */ \ | |
3969 | { Op1( 0), CRn( 0), CRm((n)), Op2( 7), trap_dbg_wb_reg, NULL, n } | |
3970 | ||
3971 | #define DBGBXVR(n) \ | |
3972 | { AA32(HI), Op1( 0), CRn( 1), CRm((n)), Op2( 1), \ | |
3973 | trap_dbg_wb_reg, NULL, n } | |
bdfb4b38 MZ |
3974 | |
3975 | /* | |
3976 | * Trapped cp14 registers. We generally ignore most of the external | |
3977 | * debug, on the principle that they don't really make sense to a | |
84e690bf | 3978 | * guest. Revisit this one day, would this principle change. |
bdfb4b38 | 3979 | */ |
72564016 | 3980 | static const struct sys_reg_desc cp14_regs[] = { |
8c358b29 AE |
3981 | /* DBGDIDR */ |
3982 | { Op1( 0), CRn( 0), CRm( 0), Op2( 0), trap_dbgdidr }, | |
bdfb4b38 MZ |
3983 | /* DBGDTRRXext */ |
3984 | { Op1( 0), CRn( 0), CRm( 0), Op2( 2), trap_raz_wi }, | |
3985 | ||
3986 | DBG_BCR_BVR_WCR_WVR(0), | |
3987 | /* DBGDSCRint */ | |
3988 | { Op1( 0), CRn( 0), CRm( 1), Op2( 0), trap_raz_wi }, | |
3989 | DBG_BCR_BVR_WCR_WVR(1), | |
3990 | /* DBGDCCINT */ | |
1da42c34 | 3991 | { Op1( 0), CRn( 0), CRm( 2), Op2( 0), trap_debug_regs, NULL, MDCCINT_EL1 }, |
bdfb4b38 | 3992 | /* DBGDSCRext */ |
1da42c34 | 3993 | { Op1( 0), CRn( 0), CRm( 2), Op2( 2), trap_debug_regs, NULL, MDSCR_EL1 }, |
bdfb4b38 MZ |
3994 | DBG_BCR_BVR_WCR_WVR(2), |
3995 | /* DBGDTR[RT]Xint */ | |
3996 | { Op1( 0), CRn( 0), CRm( 3), Op2( 0), trap_raz_wi }, | |
3997 | /* DBGDTR[RT]Xext */ | |
3998 | { Op1( 0), CRn( 0), CRm( 3), Op2( 2), trap_raz_wi }, | |
3999 | DBG_BCR_BVR_WCR_WVR(3), | |
4000 | DBG_BCR_BVR_WCR_WVR(4), | |
4001 | DBG_BCR_BVR_WCR_WVR(5), | |
4002 | /* DBGWFAR */ | |
4003 | { Op1( 0), CRn( 0), CRm( 6), Op2( 0), trap_raz_wi }, | |
4004 | /* DBGOSECCR */ | |
4005 | { Op1( 0), CRn( 0), CRm( 6), Op2( 2), trap_raz_wi }, | |
4006 | DBG_BCR_BVR_WCR_WVR(6), | |
4007 | /* DBGVCR */ | |
1da42c34 | 4008 | { Op1( 0), CRn( 0), CRm( 7), Op2( 0), trap_debug_regs, NULL, DBGVCR32_EL2 }, |
bdfb4b38 MZ |
4009 | DBG_BCR_BVR_WCR_WVR(7), |
4010 | DBG_BCR_BVR_WCR_WVR(8), | |
4011 | DBG_BCR_BVR_WCR_WVR(9), | |
4012 | DBG_BCR_BVR_WCR_WVR(10), | |
4013 | DBG_BCR_BVR_WCR_WVR(11), | |
4014 | DBG_BCR_BVR_WCR_WVR(12), | |
4015 | DBG_BCR_BVR_WCR_WVR(13), | |
4016 | DBG_BCR_BVR_WCR_WVR(14), | |
4017 | DBG_BCR_BVR_WCR_WVR(15), | |
4018 | ||
4019 | /* DBGDRAR (32bit) */ | |
4020 | { Op1( 0), CRn( 1), CRm( 0), Op2( 0), trap_raz_wi }, | |
4021 | ||
4022 | DBGBXVR(0), | |
4023 | /* DBGOSLAR */ | |
f24adc65 | 4024 | { Op1( 0), CRn( 1), CRm( 0), Op2( 4), trap_oslar_el1 }, |
bdfb4b38 MZ |
4025 | DBGBXVR(1), |
4026 | /* DBGOSLSR */ | |
d42e2671 | 4027 | { Op1( 0), CRn( 1), CRm( 1), Op2( 4), trap_oslsr_el1, NULL, OSLSR_EL1 }, |
bdfb4b38 MZ |
4028 | DBGBXVR(2), |
4029 | DBGBXVR(3), | |
4030 | /* DBGOSDLR */ | |
4031 | { Op1( 0), CRn( 1), CRm( 3), Op2( 4), trap_raz_wi }, | |
4032 | DBGBXVR(4), | |
4033 | /* DBGPRCR */ | |
4034 | { Op1( 0), CRn( 1), CRm( 4), Op2( 4), trap_raz_wi }, | |
4035 | DBGBXVR(5), | |
4036 | DBGBXVR(6), | |
4037 | DBGBXVR(7), | |
4038 | DBGBXVR(8), | |
4039 | DBGBXVR(9), | |
4040 | DBGBXVR(10), | |
4041 | DBGBXVR(11), | |
4042 | DBGBXVR(12), | |
4043 | DBGBXVR(13), | |
4044 | DBGBXVR(14), | |
4045 | DBGBXVR(15), | |
4046 | ||
4047 | /* DBGDSAR (32bit) */ | |
4048 | { Op1( 0), CRn( 2), CRm( 0), Op2( 0), trap_raz_wi }, | |
4049 | ||
4050 | /* DBGDEVID2 */ | |
4051 | { Op1( 0), CRn( 7), CRm( 0), Op2( 7), trap_raz_wi }, | |
4052 | /* DBGDEVID1 */ | |
4053 | { Op1( 0), CRn( 7), CRm( 1), Op2( 7), trap_raz_wi }, | |
4054 | /* DBGDEVID */ | |
4055 | { Op1( 0), CRn( 7), CRm( 2), Op2( 7), trap_raz_wi }, | |
4056 | /* DBGCLAIMSET */ | |
4057 | { Op1( 0), CRn( 7), CRm( 8), Op2( 6), trap_raz_wi }, | |
4058 | /* DBGCLAIMCLR */ | |
4059 | { Op1( 0), CRn( 7), CRm( 9), Op2( 6), trap_raz_wi }, | |
4060 | /* DBGAUTHSTATUS */ | |
4061 | { Op1( 0), CRn( 7), CRm(14), Op2( 6), trap_dbgauthstatus_el1 }, | |
72564016 MZ |
4062 | }; |
4063 | ||
a9866ba0 MZ |
4064 | /* Trapped cp14 64bit registers */ |
4065 | static const struct sys_reg_desc cp14_64_regs[] = { | |
bdfb4b38 MZ |
4066 | /* DBGDRAR (64bit) */ |
4067 | { Op1( 0), CRm( 1), .access = trap_raz_wi }, | |
4068 | ||
4069 | /* DBGDSAR (64bit) */ | |
4070 | { Op1( 0), CRm( 2), .access = trap_raz_wi }, | |
a9866ba0 MZ |
4071 | }; |
4072 | ||
a9e192cd AE |
4073 | #define CP15_PMU_SYS_REG(_map, _Op1, _CRn, _CRm, _Op2) \ |
4074 | AA32(_map), \ | |
4075 | Op1(_Op1), CRn(_CRn), CRm(_CRm), Op2(_Op2), \ | |
4076 | .visibility = pmu_visibility | |
4077 | ||
051ff581 SZ |
4078 | /* Macro to expand the PMEVCNTRn register */ |
4079 | #define PMU_PMEVCNTR(n) \ | |
a9e192cd AE |
4080 | { CP15_PMU_SYS_REG(DIRECT, 0, 0b1110, \ |
4081 | (0b1000 | (((n) >> 3) & 0x3)), ((n) & 0x7)), \ | |
4082 | .access = access_pmu_evcntr } | |
051ff581 | 4083 | |
9feb21ac SZ |
4084 | /* Macro to expand the PMEVTYPERn register */ |
4085 | #define PMU_PMEVTYPER(n) \ | |
a9e192cd AE |
4086 | { CP15_PMU_SYS_REG(DIRECT, 0, 0b1110, \ |
4087 | (0b1100 | (((n) >> 3) & 0x3)), ((n) & 0x7)), \ | |
4088 | .access = access_pmu_evtyper } | |
4d44923b MZ |
4089 | /* |
4090 | * Trapped cp15 registers. TTBR0/TTBR1 get a double encoding, | |
4091 | * depending on the way they are accessed (as a 32bit or a 64bit | |
4092 | * register). | |
4093 | */ | |
62a89c44 | 4094 | static const struct sys_reg_desc cp15_regs[] = { |
f7f2b15c | 4095 | { Op1( 0), CRn( 0), CRm( 0), Op2( 1), access_ctr }, |
b1ea1d76 MZ |
4096 | { Op1( 0), CRn( 1), CRm( 0), Op2( 0), access_vm_reg, NULL, SCTLR_EL1 }, |
4097 | /* ACTLR */ | |
4098 | { AA32(LO), Op1( 0), CRn( 1), CRm( 0), Op2( 1), access_actlr, NULL, ACTLR_EL1 }, | |
4099 | /* ACTLR2 */ | |
4100 | { AA32(HI), Op1( 0), CRn( 1), CRm( 0), Op2( 3), access_actlr, NULL, ACTLR_EL1 }, | |
4101 | { Op1( 0), CRn( 2), CRm( 0), Op2( 0), access_vm_reg, NULL, TTBR0_EL1 }, | |
4102 | { Op1( 0), CRn( 2), CRm( 0), Op2( 1), access_vm_reg, NULL, TTBR1_EL1 }, | |
4103 | /* TTBCR */ | |
4104 | { AA32(LO), Op1( 0), CRn( 2), CRm( 0), Op2( 2), access_vm_reg, NULL, TCR_EL1 }, | |
4105 | /* TTBCR2 */ | |
4106 | { AA32(HI), Op1( 0), CRn( 2), CRm( 0), Op2( 3), access_vm_reg, NULL, TCR_EL1 }, | |
4107 | { Op1( 0), CRn( 3), CRm( 0), Op2( 0), access_vm_reg, NULL, DACR32_EL2 }, | |
4a999a1d | 4108 | { CP15_SYS_DESC(SYS_ICC_PMR_EL1), undef_access }, |
b1ea1d76 MZ |
4109 | /* DFSR */ |
4110 | { Op1( 0), CRn( 5), CRm( 0), Op2( 0), access_vm_reg, NULL, ESR_EL1 }, | |
4111 | { Op1( 0), CRn( 5), CRm( 0), Op2( 1), access_vm_reg, NULL, IFSR32_EL2 }, | |
4112 | /* ADFSR */ | |
4113 | { Op1( 0), CRn( 5), CRm( 1), Op2( 0), access_vm_reg, NULL, AFSR0_EL1 }, | |
4114 | /* AIFSR */ | |
4115 | { Op1( 0), CRn( 5), CRm( 1), Op2( 1), access_vm_reg, NULL, AFSR1_EL1 }, | |
4116 | /* DFAR */ | |
4117 | { AA32(LO), Op1( 0), CRn( 6), CRm( 0), Op2( 0), access_vm_reg, NULL, FAR_EL1 }, | |
4118 | /* IFAR */ | |
4119 | { AA32(HI), Op1( 0), CRn( 6), CRm( 0), Op2( 2), access_vm_reg, NULL, FAR_EL1 }, | |
4d44923b | 4120 | |
62a89c44 MZ |
4121 | /* |
4122 | * DC{C,I,CI}SW operations: | |
4123 | */ | |
4124 | { Op1( 0), CRn( 7), CRm( 6), Op2( 2), access_dcsw }, | |
4125 | { Op1( 0), CRn( 7), CRm(10), Op2( 2), access_dcsw }, | |
4126 | { Op1( 0), CRn( 7), CRm(14), Op2( 2), access_dcsw }, | |
4d44923b | 4127 | |
7609c125 | 4128 | /* PMU */ |
a9e192cd AE |
4129 | { CP15_PMU_SYS_REG(DIRECT, 0, 9, 12, 0), .access = access_pmcr }, |
4130 | { CP15_PMU_SYS_REG(DIRECT, 0, 9, 12, 1), .access = access_pmcnten }, | |
4131 | { CP15_PMU_SYS_REG(DIRECT, 0, 9, 12, 2), .access = access_pmcnten }, | |
4132 | { CP15_PMU_SYS_REG(DIRECT, 0, 9, 12, 3), .access = access_pmovs }, | |
4133 | { CP15_PMU_SYS_REG(DIRECT, 0, 9, 12, 4), .access = access_pmswinc }, | |
4134 | { CP15_PMU_SYS_REG(DIRECT, 0, 9, 12, 5), .access = access_pmselr }, | |
4135 | { CP15_PMU_SYS_REG(LO, 0, 9, 12, 6), .access = access_pmceid }, | |
4136 | { CP15_PMU_SYS_REG(LO, 0, 9, 12, 7), .access = access_pmceid }, | |
4137 | { CP15_PMU_SYS_REG(DIRECT, 0, 9, 13, 0), .access = access_pmu_evcntr }, | |
4138 | { CP15_PMU_SYS_REG(DIRECT, 0, 9, 13, 1), .access = access_pmu_evtyper }, | |
4139 | { CP15_PMU_SYS_REG(DIRECT, 0, 9, 13, 2), .access = access_pmu_evcntr }, | |
4140 | { CP15_PMU_SYS_REG(DIRECT, 0, 9, 14, 0), .access = access_pmuserenr }, | |
4141 | { CP15_PMU_SYS_REG(DIRECT, 0, 9, 14, 1), .access = access_pminten }, | |
4142 | { CP15_PMU_SYS_REG(DIRECT, 0, 9, 14, 2), .access = access_pminten }, | |
4143 | { CP15_PMU_SYS_REG(DIRECT, 0, 9, 14, 3), .access = access_pmovs }, | |
4144 | { CP15_PMU_SYS_REG(HI, 0, 9, 14, 4), .access = access_pmceid }, | |
4145 | { CP15_PMU_SYS_REG(HI, 0, 9, 14, 5), .access = access_pmceid }, | |
46081078 | 4146 | /* PMMIR */ |
a9e192cd | 4147 | { CP15_PMU_SYS_REG(DIRECT, 0, 9, 14, 6), .access = trap_raz_wi }, |
4d44923b | 4148 | |
b1ea1d76 MZ |
4149 | /* PRRR/MAIR0 */ |
4150 | { AA32(LO), Op1( 0), CRn(10), CRm( 2), Op2( 0), access_vm_reg, NULL, MAIR_EL1 }, | |
4151 | /* NMRR/MAIR1 */ | |
4152 | { AA32(HI), Op1( 0), CRn(10), CRm( 2), Op2( 1), access_vm_reg, NULL, MAIR_EL1 }, | |
4153 | /* AMAIR0 */ | |
4154 | { AA32(LO), Op1( 0), CRn(10), CRm( 3), Op2( 0), access_vm_reg, NULL, AMAIR_EL1 }, | |
4155 | /* AMAIR1 */ | |
4156 | { AA32(HI), Op1( 0), CRn(10), CRm( 3), Op2( 1), access_vm_reg, NULL, AMAIR_EL1 }, | |
db7dedd0 | 4157 | |
4a999a1d MZ |
4158 | { CP15_SYS_DESC(SYS_ICC_IAR0_EL1), undef_access }, |
4159 | { CP15_SYS_DESC(SYS_ICC_EOIR0_EL1), undef_access }, | |
4160 | { CP15_SYS_DESC(SYS_ICC_HPPIR0_EL1), undef_access }, | |
4161 | { CP15_SYS_DESC(SYS_ICC_BPR0_EL1), undef_access }, | |
4162 | { CP15_SYS_DESC(SYS_ICC_AP0R0_EL1), undef_access }, | |
4163 | { CP15_SYS_DESC(SYS_ICC_AP0R1_EL1), undef_access }, | |
4164 | { CP15_SYS_DESC(SYS_ICC_AP0R2_EL1), undef_access }, | |
4165 | { CP15_SYS_DESC(SYS_ICC_AP0R3_EL1), undef_access }, | |
4166 | { CP15_SYS_DESC(SYS_ICC_AP1R0_EL1), undef_access }, | |
4167 | { CP15_SYS_DESC(SYS_ICC_AP1R1_EL1), undef_access }, | |
4168 | { CP15_SYS_DESC(SYS_ICC_AP1R2_EL1), undef_access }, | |
4169 | { CP15_SYS_DESC(SYS_ICC_AP1R3_EL1), undef_access }, | |
4170 | { CP15_SYS_DESC(SYS_ICC_DIR_EL1), undef_access }, | |
4171 | { CP15_SYS_DESC(SYS_ICC_RPR_EL1), undef_access }, | |
4172 | { CP15_SYS_DESC(SYS_ICC_IAR1_EL1), undef_access }, | |
4173 | { CP15_SYS_DESC(SYS_ICC_EOIR1_EL1), undef_access }, | |
4174 | { CP15_SYS_DESC(SYS_ICC_HPPIR1_EL1), undef_access }, | |
4175 | { CP15_SYS_DESC(SYS_ICC_BPR1_EL1), undef_access }, | |
4176 | { CP15_SYS_DESC(SYS_ICC_CTLR_EL1), undef_access }, | |
4177 | { CP15_SYS_DESC(SYS_ICC_SRE_EL1), access_gic_sre }, | |
4178 | { CP15_SYS_DESC(SYS_ICC_IGRPEN0_EL1), undef_access }, | |
4179 | { CP15_SYS_DESC(SYS_ICC_IGRPEN1_EL1), undef_access }, | |
db7dedd0 | 4180 | |
b1ea1d76 | 4181 | { Op1( 0), CRn(13), CRm( 0), Op2( 1), access_vm_reg, NULL, CONTEXTIDR_EL1 }, |
051ff581 | 4182 | |
84135d3d AP |
4183 | /* Arch Tmers */ |
4184 | { SYS_DESC(SYS_AARCH32_CNTP_TVAL), access_arch_timer }, | |
4185 | { SYS_DESC(SYS_AARCH32_CNTP_CTL), access_arch_timer }, | |
eac137b4 | 4186 | |
051ff581 SZ |
4187 | /* PMEVCNTRn */ |
4188 | PMU_PMEVCNTR(0), | |
4189 | PMU_PMEVCNTR(1), | |
4190 | PMU_PMEVCNTR(2), | |
4191 | PMU_PMEVCNTR(3), | |
4192 | PMU_PMEVCNTR(4), | |
4193 | PMU_PMEVCNTR(5), | |
4194 | PMU_PMEVCNTR(6), | |
4195 | PMU_PMEVCNTR(7), | |
4196 | PMU_PMEVCNTR(8), | |
4197 | PMU_PMEVCNTR(9), | |
4198 | PMU_PMEVCNTR(10), | |
4199 | PMU_PMEVCNTR(11), | |
4200 | PMU_PMEVCNTR(12), | |
4201 | PMU_PMEVCNTR(13), | |
4202 | PMU_PMEVCNTR(14), | |
4203 | PMU_PMEVCNTR(15), | |
4204 | PMU_PMEVCNTR(16), | |
4205 | PMU_PMEVCNTR(17), | |
4206 | PMU_PMEVCNTR(18), | |
4207 | PMU_PMEVCNTR(19), | |
4208 | PMU_PMEVCNTR(20), | |
4209 | PMU_PMEVCNTR(21), | |
4210 | PMU_PMEVCNTR(22), | |
4211 | PMU_PMEVCNTR(23), | |
4212 | PMU_PMEVCNTR(24), | |
4213 | PMU_PMEVCNTR(25), | |
4214 | PMU_PMEVCNTR(26), | |
4215 | PMU_PMEVCNTR(27), | |
4216 | PMU_PMEVCNTR(28), | |
4217 | PMU_PMEVCNTR(29), | |
4218 | PMU_PMEVCNTR(30), | |
9feb21ac SZ |
4219 | /* PMEVTYPERn */ |
4220 | PMU_PMEVTYPER(0), | |
4221 | PMU_PMEVTYPER(1), | |
4222 | PMU_PMEVTYPER(2), | |
4223 | PMU_PMEVTYPER(3), | |
4224 | PMU_PMEVTYPER(4), | |
4225 | PMU_PMEVTYPER(5), | |
4226 | PMU_PMEVTYPER(6), | |
4227 | PMU_PMEVTYPER(7), | |
4228 | PMU_PMEVTYPER(8), | |
4229 | PMU_PMEVTYPER(9), | |
4230 | PMU_PMEVTYPER(10), | |
4231 | PMU_PMEVTYPER(11), | |
4232 | PMU_PMEVTYPER(12), | |
4233 | PMU_PMEVTYPER(13), | |
4234 | PMU_PMEVTYPER(14), | |
4235 | PMU_PMEVTYPER(15), | |
4236 | PMU_PMEVTYPER(16), | |
4237 | PMU_PMEVTYPER(17), | |
4238 | PMU_PMEVTYPER(18), | |
4239 | PMU_PMEVTYPER(19), | |
4240 | PMU_PMEVTYPER(20), | |
4241 | PMU_PMEVTYPER(21), | |
4242 | PMU_PMEVTYPER(22), | |
4243 | PMU_PMEVTYPER(23), | |
4244 | PMU_PMEVTYPER(24), | |
4245 | PMU_PMEVTYPER(25), | |
4246 | PMU_PMEVTYPER(26), | |
4247 | PMU_PMEVTYPER(27), | |
4248 | PMU_PMEVTYPER(28), | |
4249 | PMU_PMEVTYPER(29), | |
4250 | PMU_PMEVTYPER(30), | |
4251 | /* PMCCFILTR */ | |
a9e192cd | 4252 | { CP15_PMU_SYS_REG(DIRECT, 0, 14, 15, 7), .access = access_pmu_evtyper }, |
f7f2b15c AB |
4253 | |
4254 | { Op1(1), CRn( 0), CRm( 0), Op2(0), access_ccsidr }, | |
4255 | { Op1(1), CRn( 0), CRm( 0), Op2(1), access_clidr }, | |
bf48040c AO |
4256 | |
4257 | /* CCSIDR2 */ | |
4258 | { Op1(1), CRn( 0), CRm( 0), Op2(2), undef_access }, | |
4259 | ||
b1ea1d76 | 4260 | { Op1(2), CRn( 0), CRm( 0), Op2(0), access_csselr, NULL, CSSELR_EL1 }, |
a9866ba0 MZ |
4261 | }; |
4262 | ||
4263 | static const struct sys_reg_desc cp15_64_regs[] = { | |
b1ea1d76 | 4264 | { Op1( 0), CRn( 0), CRm( 2), Op2( 0), access_vm_reg, NULL, TTBR0_EL1 }, |
a9e192cd | 4265 | { CP15_PMU_SYS_REG(DIRECT, 0, 0, 9, 0), .access = access_pmu_evcntr }, |
03bd646d | 4266 | { Op1( 0), CRn( 0), CRm(12), Op2( 0), access_gic_sgi }, /* ICC_SGI1R */ |
c605ee24 | 4267 | { SYS_DESC(SYS_AARCH32_CNTPCT), access_arch_timer }, |
b1ea1d76 | 4268 | { Op1( 1), CRn( 0), CRm( 2), Op2( 0), access_vm_reg, NULL, TTBR1_EL1 }, |
03bd646d | 4269 | { Op1( 1), CRn( 0), CRm(12), Op2( 0), access_gic_sgi }, /* ICC_ASGI1R */ |
b59dbb91 | 4270 | { SYS_DESC(SYS_AARCH32_CNTVCT), access_arch_timer }, |
03bd646d | 4271 | { Op1( 2), CRn( 0), CRm(12), Op2( 0), access_gic_sgi }, /* ICC_SGI0R */ |
84135d3d | 4272 | { SYS_DESC(SYS_AARCH32_CNTP_CVAL), access_arch_timer }, |
a6610435 | 4273 | { SYS_DESC(SYS_AARCH32_CNTPCTSS), access_arch_timer }, |
b59dbb91 | 4274 | { SYS_DESC(SYS_AARCH32_CNTVCTSS), access_arch_timer }, |
7c8c5e6a MZ |
4275 | }; |
4276 | ||
f1f0c0cf AE |
4277 | static bool check_sysreg_table(const struct sys_reg_desc *table, unsigned int n, |
4278 | bool is_32) | |
bb44a8db MZ |
4279 | { |
4280 | unsigned int i; | |
4281 | ||
4282 | for (i = 0; i < n; i++) { | |
4283 | if (!is_32 && table[i].reg && !table[i].reset) { | |
a88a9ec1 MZ |
4284 | kvm_err("sys_reg table %pS entry %d (%s) lacks reset\n", |
4285 | &table[i], i, table[i].name); | |
f1f0c0cf | 4286 | return false; |
bb44a8db MZ |
4287 | } |
4288 | ||
4289 | if (i && cmp_sys_reg(&table[i-1], &table[i]) >= 0) { | |
a88a9ec1 MZ |
4290 | kvm_err("sys_reg table %pS entry %d (%s -> %s) out of order\n", |
4291 | &table[i], i, table[i - 1].name, table[i].name); | |
f1f0c0cf | 4292 | return false; |
bb44a8db MZ |
4293 | } |
4294 | } | |
4295 | ||
f1f0c0cf | 4296 | return true; |
bb44a8db MZ |
4297 | } |
4298 | ||
74cc7e0c | 4299 | int kvm_handle_cp14_load_store(struct kvm_vcpu *vcpu) |
62a89c44 MZ |
4300 | { |
4301 | kvm_inject_undefined(vcpu); | |
4302 | return 1; | |
4303 | } | |
4304 | ||
e70b9522 MZ |
4305 | static void perform_access(struct kvm_vcpu *vcpu, |
4306 | struct sys_reg_params *params, | |
4307 | const struct sys_reg_desc *r) | |
4308 | { | |
599d79dc MZ |
4309 | trace_kvm_sys_access(*vcpu_pc(vcpu), params, r); |
4310 | ||
7f34e409 | 4311 | /* Check for regs disabled by runtime config */ |
01fe5ace | 4312 | if (sysreg_hidden(vcpu, r)) { |
7f34e409 DM |
4313 | kvm_inject_undefined(vcpu); |
4314 | return; | |
4315 | } | |
4316 | ||
e70b9522 MZ |
4317 | /* |
4318 | * Not having an accessor means that we have configured a trap | |
4319 | * that we don't know how to handle. This certainly qualifies | |
4320 | * as a gross bug that should be fixed right away. | |
4321 | */ | |
4322 | BUG_ON(!r->access); | |
4323 | ||
4324 | /* Skip instruction if instructed so */ | |
4325 | if (likely(r->access(vcpu, params, r))) | |
cdb5e02e | 4326 | kvm_incr_pc(vcpu); |
e70b9522 MZ |
4327 | } |
4328 | ||
72564016 MZ |
4329 | /* |
4330 | * emulate_cp -- tries to match a sys_reg access in a handling table, and | |
4331 | * call the corresponding trap handler. | |
4332 | * | |
4333 | * @params: pointer to the descriptor of the access | |
4334 | * @table: array of trap descriptors | |
4335 | * @num: size of the trap descriptor array | |
4336 | * | |
001bb819 | 4337 | * Return true if the access has been handled, false if not. |
72564016 | 4338 | */ |
001bb819 OU |
4339 | static bool emulate_cp(struct kvm_vcpu *vcpu, |
4340 | struct sys_reg_params *params, | |
4341 | const struct sys_reg_desc *table, | |
4342 | size_t num) | |
62a89c44 | 4343 | { |
72564016 | 4344 | const struct sys_reg_desc *r; |
62a89c44 | 4345 | |
72564016 | 4346 | if (!table) |
001bb819 | 4347 | return false; /* Not handled */ |
62a89c44 | 4348 | |
62a89c44 | 4349 | r = find_reg(params, table, num); |
62a89c44 | 4350 | |
72564016 | 4351 | if (r) { |
e70b9522 | 4352 | perform_access(vcpu, params, r); |
001bb819 | 4353 | return true; |
72564016 MZ |
4354 | } |
4355 | ||
4356 | /* Not handled */ | |
001bb819 | 4357 | return false; |
72564016 MZ |
4358 | } |
4359 | ||
4360 | static void unhandled_cp_access(struct kvm_vcpu *vcpu, | |
4361 | struct sys_reg_params *params) | |
4362 | { | |
3a949f4c | 4363 | u8 esr_ec = kvm_vcpu_trap_get_class(vcpu); |
40c4f8d2 | 4364 | int cp = -1; |
72564016 | 4365 | |
3a949f4c | 4366 | switch (esr_ec) { |
c6d01a94 MR |
4367 | case ESR_ELx_EC_CP15_32: |
4368 | case ESR_ELx_EC_CP15_64: | |
72564016 MZ |
4369 | cp = 15; |
4370 | break; | |
c6d01a94 MR |
4371 | case ESR_ELx_EC_CP14_MR: |
4372 | case ESR_ELx_EC_CP14_64: | |
72564016 MZ |
4373 | cp = 14; |
4374 | break; | |
4375 | default: | |
40c4f8d2 | 4376 | WARN_ON(1); |
62a89c44 MZ |
4377 | } |
4378 | ||
bf4b96bb MR |
4379 | print_sys_reg_msg(params, |
4380 | "Unsupported guest CP%d access at: %08lx [%08lx]\n", | |
4381 | cp, *vcpu_pc(vcpu), *vcpu_cpsr(vcpu)); | |
62a89c44 MZ |
4382 | kvm_inject_undefined(vcpu); |
4383 | } | |
4384 | ||
4385 | /** | |
7769db90 | 4386 | * kvm_handle_cp_64 -- handles a mrrc/mcrr trap on a guest CP14/CP15 access |
62a89c44 | 4387 | * @vcpu: The VCPU pointer |
8ce78392 RD |
4388 | * @global: &struct sys_reg_desc |
4389 | * @nr_global: size of the @global array | |
62a89c44 | 4390 | */ |
72564016 MZ |
4391 | static int kvm_handle_cp_64(struct kvm_vcpu *vcpu, |
4392 | const struct sys_reg_desc *global, | |
dcaffa7b | 4393 | size_t nr_global) |
62a89c44 MZ |
4394 | { |
4395 | struct sys_reg_params params; | |
0b12620f | 4396 | u64 esr = kvm_vcpu_get_esr(vcpu); |
c667186f | 4397 | int Rt = kvm_vcpu_sys_get_rt(vcpu); |
3a949f4c | 4398 | int Rt2 = (esr >> 10) & 0x1f; |
62a89c44 | 4399 | |
3a949f4c GS |
4400 | params.CRm = (esr >> 1) & 0xf; |
4401 | params.is_write = ((esr & 1) == 0); | |
62a89c44 MZ |
4402 | |
4403 | params.Op0 = 0; | |
3a949f4c | 4404 | params.Op1 = (esr >> 16) & 0xf; |
62a89c44 MZ |
4405 | params.Op2 = 0; |
4406 | params.CRn = 0; | |
4407 | ||
4408 | /* | |
2ec5be3d | 4409 | * Make a 64-bit value out of Rt and Rt2. As we use the same trap |
62a89c44 MZ |
4410 | * backends between AArch32 and AArch64, we get away with it. |
4411 | */ | |
4412 | if (params.is_write) { | |
2ec5be3d PF |
4413 | params.regval = vcpu_get_reg(vcpu, Rt) & 0xffffffff; |
4414 | params.regval |= vcpu_get_reg(vcpu, Rt2) << 32; | |
62a89c44 MZ |
4415 | } |
4416 | ||
b6b7a806 | 4417 | /* |
dcaffa7b | 4418 | * If the table contains a handler, handle the |
b6b7a806 MZ |
4419 | * potential register operation in the case of a read and return |
4420 | * with success. | |
4421 | */ | |
001bb819 | 4422 | if (emulate_cp(vcpu, ¶ms, global, nr_global)) { |
b6b7a806 MZ |
4423 | /* Split up the value between registers for the read side */ |
4424 | if (!params.is_write) { | |
4425 | vcpu_set_reg(vcpu, Rt, lower_32_bits(params.regval)); | |
4426 | vcpu_set_reg(vcpu, Rt2, upper_32_bits(params.regval)); | |
4427 | } | |
62a89c44 | 4428 | |
b6b7a806 | 4429 | return 1; |
62a89c44 MZ |
4430 | } |
4431 | ||
b6b7a806 | 4432 | unhandled_cp_access(vcpu, ¶ms); |
62a89c44 MZ |
4433 | return 1; |
4434 | } | |
4435 | ||
e6519766 OU |
4436 | static bool emulate_sys_reg(struct kvm_vcpu *vcpu, struct sys_reg_params *params); |
4437 | ||
9369bc5c OU |
4438 | /* |
4439 | * The CP10 ID registers are architecturally mapped to AArch64 feature | |
4440 | * registers. Abuse that fact so we can rely on the AArch64 handler for accesses | |
4441 | * from AArch32. | |
4442 | */ | |
ee87a9bd | 4443 | static bool kvm_esr_cp10_id_to_sys64(u64 esr, struct sys_reg_params *params) |
9369bc5c OU |
4444 | { |
4445 | u8 reg_id = (esr >> 10) & 0xf; | |
4446 | bool valid; | |
4447 | ||
4448 | params->is_write = ((esr & 1) == 0); | |
4449 | params->Op0 = 3; | |
4450 | params->Op1 = 0; | |
4451 | params->CRn = 0; | |
4452 | params->CRm = 3; | |
4453 | ||
4454 | /* CP10 ID registers are read-only */ | |
4455 | valid = !params->is_write; | |
4456 | ||
4457 | switch (reg_id) { | |
4458 | /* MVFR0 */ | |
4459 | case 0b0111: | |
4460 | params->Op2 = 0; | |
4461 | break; | |
4462 | /* MVFR1 */ | |
4463 | case 0b0110: | |
4464 | params->Op2 = 1; | |
4465 | break; | |
4466 | /* MVFR2 */ | |
4467 | case 0b0101: | |
4468 | params->Op2 = 2; | |
4469 | break; | |
4470 | default: | |
4471 | valid = false; | |
4472 | } | |
4473 | ||
4474 | if (valid) | |
4475 | return true; | |
4476 | ||
4477 | kvm_pr_unimpl("Unhandled cp10 register %s: %u\n", | |
4478 | params->is_write ? "write" : "read", reg_id); | |
4479 | return false; | |
4480 | } | |
4481 | ||
4482 | /** | |
4483 | * kvm_handle_cp10_id() - Handles a VMRS trap on guest access to a 'Media and | |
4484 | * VFP Register' from AArch32. | |
4485 | * @vcpu: The vCPU pointer | |
4486 | * | |
4487 | * MVFR{0-2} are architecturally mapped to the AArch64 MVFR{0-2}_EL1 registers. | |
4488 | * Work out the correct AArch64 system register encoding and reroute to the | |
4489 | * AArch64 system register emulation. | |
4490 | */ | |
4491 | int kvm_handle_cp10_id(struct kvm_vcpu *vcpu) | |
4492 | { | |
4493 | int Rt = kvm_vcpu_sys_get_rt(vcpu); | |
ee87a9bd | 4494 | u64 esr = kvm_vcpu_get_esr(vcpu); |
9369bc5c OU |
4495 | struct sys_reg_params params; |
4496 | ||
4497 | /* UNDEF on any unhandled register access */ | |
4498 | if (!kvm_esr_cp10_id_to_sys64(esr, ¶ms)) { | |
4499 | kvm_inject_undefined(vcpu); | |
4500 | return 1; | |
4501 | } | |
4502 | ||
4503 | if (emulate_sys_reg(vcpu, ¶ms)) | |
4504 | vcpu_set_reg(vcpu, Rt, params.regval); | |
4505 | ||
4506 | return 1; | |
4507 | } | |
4508 | ||
e6519766 OU |
4509 | /** |
4510 | * kvm_emulate_cp15_id_reg() - Handles an MRC trap on a guest CP15 access where | |
4511 | * CRn=0, which corresponds to the AArch32 feature | |
4512 | * registers. | |
4513 | * @vcpu: the vCPU pointer | |
4514 | * @params: the system register access parameters. | |
4515 | * | |
4516 | * Our cp15 system register tables do not enumerate the AArch32 feature | |
4517 | * registers. Conveniently, our AArch64 table does, and the AArch32 system | |
4518 | * register encoding can be trivially remapped into the AArch64 for the feature | |
4519 | * registers: Append op0=3, leaving op1, CRn, CRm, and op2 the same. | |
4520 | * | |
4521 | * According to DDI0487G.b G7.3.1, paragraph "Behavior of VMSAv8-32 32-bit | |
4522 | * System registers with (coproc=0b1111, CRn==c0)", read accesses from this | |
4523 | * range are either UNKNOWN or RES0. Rerouting remains architectural as we | |
4524 | * treat undefined registers in this range as RAZ. | |
4525 | */ | |
4526 | static int kvm_emulate_cp15_id_reg(struct kvm_vcpu *vcpu, | |
4527 | struct sys_reg_params *params) | |
4528 | { | |
4529 | int Rt = kvm_vcpu_sys_get_rt(vcpu); | |
4530 | ||
4531 | /* Treat impossible writes to RO registers as UNDEFINED */ | |
4532 | if (params->is_write) { | |
4533 | unhandled_cp_access(vcpu, params); | |
4534 | return 1; | |
4535 | } | |
4536 | ||
4537 | params->Op0 = 3; | |
4538 | ||
4539 | /* | |
4540 | * All registers where CRm > 3 are known to be UNKNOWN/RAZ from AArch32. | |
4541 | * Avoid conflicting with future expansion of AArch64 feature registers | |
4542 | * and simply treat them as RAZ here. | |
4543 | */ | |
4544 | if (params->CRm > 3) | |
4545 | params->regval = 0; | |
4546 | else if (!emulate_sys_reg(vcpu, params)) | |
4547 | return 1; | |
4548 | ||
4549 | vcpu_set_reg(vcpu, Rt, params->regval); | |
4550 | return 1; | |
4551 | } | |
4552 | ||
62a89c44 | 4553 | /** |
7769db90 | 4554 | * kvm_handle_cp_32 -- handles a mrc/mcr trap on a guest CP14/CP15 access |
62a89c44 | 4555 | * @vcpu: The VCPU pointer |
8ce78392 RD |
4556 | * @params: &struct sys_reg_params |
4557 | * @global: &struct sys_reg_desc | |
4558 | * @nr_global: size of the @global array | |
62a89c44 | 4559 | */ |
72564016 | 4560 | static int kvm_handle_cp_32(struct kvm_vcpu *vcpu, |
e6519766 | 4561 | struct sys_reg_params *params, |
72564016 | 4562 | const struct sys_reg_desc *global, |
dcaffa7b | 4563 | size_t nr_global) |
62a89c44 | 4564 | { |
c667186f | 4565 | int Rt = kvm_vcpu_sys_get_rt(vcpu); |
62a89c44 | 4566 | |
e6519766 | 4567 | params->regval = vcpu_get_reg(vcpu, Rt); |
62a89c44 | 4568 | |
e6519766 OU |
4569 | if (emulate_cp(vcpu, params, global, nr_global)) { |
4570 | if (!params->is_write) | |
4571 | vcpu_set_reg(vcpu, Rt, params->regval); | |
72564016 | 4572 | return 1; |
2ec5be3d | 4573 | } |
72564016 | 4574 | |
e6519766 | 4575 | unhandled_cp_access(vcpu, params); |
62a89c44 MZ |
4576 | return 1; |
4577 | } | |
4578 | ||
74cc7e0c | 4579 | int kvm_handle_cp15_64(struct kvm_vcpu *vcpu) |
72564016 | 4580 | { |
dcaffa7b | 4581 | return kvm_handle_cp_64(vcpu, cp15_64_regs, ARRAY_SIZE(cp15_64_regs)); |
72564016 MZ |
4582 | } |
4583 | ||
74cc7e0c | 4584 | int kvm_handle_cp15_32(struct kvm_vcpu *vcpu) |
72564016 | 4585 | { |
e6519766 OU |
4586 | struct sys_reg_params params; |
4587 | ||
4588 | params = esr_cp1x_32_to_params(kvm_vcpu_get_esr(vcpu)); | |
4589 | ||
4590 | /* | |
4591 | * Certain AArch32 ID registers are handled by rerouting to the AArch64 | |
4592 | * system register table. Registers in the ID range where CRm=0 are | |
4593 | * excluded from this scheme as they do not trivially map into AArch64 | |
4cd48565 | 4594 | * system register encodings, except for AIDR/REVIDR. |
e6519766 | 4595 | */ |
4cd48565 OU |
4596 | if (params.Op1 == 0 && params.CRn == 0 && |
4597 | (params.CRm || params.Op2 == 6 /* REVIDR */)) | |
4598 | return kvm_emulate_cp15_id_reg(vcpu, ¶ms); | |
4599 | if (params.Op1 == 1 && params.CRn == 0 && | |
4600 | params.CRm == 0 && params.Op2 == 7 /* AIDR */) | |
e6519766 OU |
4601 | return kvm_emulate_cp15_id_reg(vcpu, ¶ms); |
4602 | ||
4603 | return kvm_handle_cp_32(vcpu, ¶ms, cp15_regs, ARRAY_SIZE(cp15_regs)); | |
72564016 MZ |
4604 | } |
4605 | ||
74cc7e0c | 4606 | int kvm_handle_cp14_64(struct kvm_vcpu *vcpu) |
72564016 | 4607 | { |
dcaffa7b | 4608 | return kvm_handle_cp_64(vcpu, cp14_64_regs, ARRAY_SIZE(cp14_64_regs)); |
72564016 MZ |
4609 | } |
4610 | ||
74cc7e0c | 4611 | int kvm_handle_cp14_32(struct kvm_vcpu *vcpu) |
72564016 | 4612 | { |
e6519766 OU |
4613 | struct sys_reg_params params; |
4614 | ||
4615 | params = esr_cp1x_32_to_params(kvm_vcpu_get_esr(vcpu)); | |
4616 | ||
4617 | return kvm_handle_cp_32(vcpu, ¶ms, cp14_regs, ARRAY_SIZE(cp14_regs)); | |
72564016 MZ |
4618 | } |
4619 | ||
28eda7b5 OU |
4620 | /** |
4621 | * emulate_sys_reg - Emulate a guest access to an AArch64 system register | |
4622 | * @vcpu: The VCPU pointer | |
4623 | * @params: Decoded system register parameters | |
4624 | * | |
4625 | * Return: true if the system register access was successful, false otherwise. | |
4626 | */ | |
4627 | static bool emulate_sys_reg(struct kvm_vcpu *vcpu, | |
cc5f84fb | 4628 | struct sys_reg_params *params) |
7c8c5e6a | 4629 | { |
dcaffa7b | 4630 | const struct sys_reg_desc *r; |
7c8c5e6a | 4631 | |
dcaffa7b | 4632 | r = find_reg(params, sys_reg_descs, ARRAY_SIZE(sys_reg_descs)); |
7c8c5e6a | 4633 | if (likely(r)) { |
e70b9522 | 4634 | perform_access(vcpu, params, r); |
28eda7b5 OU |
4635 | return true; |
4636 | } | |
4637 | ||
cc5f84fb MZ |
4638 | print_sys_reg_msg(params, |
4639 | "Unsupported guest sys_reg access at: %lx [%08lx]\n", | |
4640 | *vcpu_pc(vcpu), *vcpu_cpsr(vcpu)); | |
4641 | kvm_inject_undefined(vcpu); | |
89bc63fa | 4642 | |
cc5f84fb | 4643 | return false; |
89bc63fa MZ |
4644 | } |
4645 | ||
410db103 OU |
4646 | static const struct sys_reg_desc *idregs_debug_find(struct kvm *kvm, u8 pos) |
4647 | { | |
4648 | unsigned long i, idreg_idx = 0; | |
4649 | ||
4650 | for (i = 0; i < ARRAY_SIZE(sys_reg_descs); i++) { | |
4651 | const struct sys_reg_desc *r = &sys_reg_descs[i]; | |
4652 | ||
4653 | if (!is_vm_ftr_id_reg(reg_to_encoding(r))) | |
4654 | continue; | |
4655 | ||
4656 | if (idreg_idx == pos) | |
4657 | return r; | |
4658 | ||
4659 | idreg_idx++; | |
4660 | } | |
4661 | ||
4662 | return NULL; | |
4663 | } | |
4664 | ||
89176658 MZ |
4665 | static void *idregs_debug_start(struct seq_file *s, loff_t *pos) |
4666 | { | |
4667 | struct kvm *kvm = s->private; | |
4668 | u8 *iter; | |
4669 | ||
4670 | mutex_lock(&kvm->arch.config_lock); | |
4671 | ||
4672 | iter = &kvm->arch.idreg_debugfs_iter; | |
29ef55ce OU |
4673 | if (test_bit(KVM_ARCH_FLAG_ID_REGS_INITIALIZED, &kvm->arch.flags) && |
4674 | *iter == (u8)~0) { | |
89176658 | 4675 | *iter = *pos; |
410db103 | 4676 | if (!idregs_debug_find(kvm, *iter)) |
89176658 MZ |
4677 | iter = NULL; |
4678 | } else { | |
4679 | iter = ERR_PTR(-EBUSY); | |
4680 | } | |
4681 | ||
4682 | mutex_unlock(&kvm->arch.config_lock); | |
4683 | ||
4684 | return iter; | |
4685 | } | |
4686 | ||
4687 | static void *idregs_debug_next(struct seq_file *s, void *v, loff_t *pos) | |
4688 | { | |
4689 | struct kvm *kvm = s->private; | |
4690 | ||
4691 | (*pos)++; | |
4692 | ||
410db103 | 4693 | if (idregs_debug_find(kvm, kvm->arch.idreg_debugfs_iter + 1)) { |
89176658 MZ |
4694 | kvm->arch.idreg_debugfs_iter++; |
4695 | ||
4696 | return &kvm->arch.idreg_debugfs_iter; | |
4697 | } | |
4698 | ||
4699 | return NULL; | |
4700 | } | |
4701 | ||
4702 | static void idregs_debug_stop(struct seq_file *s, void *v) | |
4703 | { | |
4704 | struct kvm *kvm = s->private; | |
4705 | ||
4706 | if (IS_ERR(v)) | |
4707 | return; | |
4708 | ||
4709 | mutex_lock(&kvm->arch.config_lock); | |
4710 | ||
4711 | kvm->arch.idreg_debugfs_iter = ~0; | |
4712 | ||
4713 | mutex_unlock(&kvm->arch.config_lock); | |
4714 | } | |
4715 | ||
4716 | static int idregs_debug_show(struct seq_file *s, void *v) | |
4717 | { | |
89176658 | 4718 | const struct sys_reg_desc *desc; |
410db103 | 4719 | struct kvm *kvm = s->private; |
89176658 | 4720 | |
410db103 | 4721 | desc = idregs_debug_find(kvm, kvm->arch.idreg_debugfs_iter); |
89176658 MZ |
4722 | |
4723 | if (!desc->name) | |
4724 | return 0; | |
4725 | ||
4726 | seq_printf(s, "%20s:\t%016llx\n", | |
97ca3fcc | 4727 | desc->name, kvm_read_vm_id_reg(kvm, reg_to_encoding(desc))); |
89176658 MZ |
4728 | |
4729 | return 0; | |
4730 | } | |
4731 | ||
4732 | static const struct seq_operations idregs_debug_sops = { | |
4733 | .start = idregs_debug_start, | |
4734 | .next = idregs_debug_next, | |
4735 | .stop = idregs_debug_stop, | |
4736 | .show = idregs_debug_show, | |
4737 | }; | |
4738 | ||
4739 | DEFINE_SEQ_ATTRIBUTE(idregs_debug); | |
4740 | ||
5c1ebe9a OU |
4741 | void kvm_sys_regs_create_debugfs(struct kvm *kvm) |
4742 | { | |
4743 | kvm->arch.idreg_debugfs_iter = ~0; | |
4744 | ||
4745 | debugfs_create_file("idregs", 0444, kvm->debugfs_dentry, kvm, | |
4746 | &idregs_debug_fops); | |
4747 | } | |
4748 | ||
44cbe80b | 4749 | static void reset_vm_ftr_id_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *reg) |
47334146 | 4750 | { |
44cbe80b | 4751 | u32 id = reg_to_encoding(reg); |
47334146 JZ |
4752 | struct kvm *kvm = vcpu->kvm; |
4753 | ||
4754 | if (test_bit(KVM_ARCH_FLAG_ID_REGS_INITIALIZED, &kvm->arch.flags)) | |
4755 | return; | |
4756 | ||
d7508d27 | 4757 | kvm_set_vm_id_reg(kvm, id, reg->reset(vcpu, reg)); |
47334146 | 4758 | } |
47334146 | 4759 | |
e0163337 OU |
4760 | static void reset_vcpu_ftr_id_reg(struct kvm_vcpu *vcpu, |
4761 | const struct sys_reg_desc *reg) | |
4762 | { | |
4763 | if (kvm_vcpu_initialized(vcpu)) | |
4764 | return; | |
47334146 | 4765 | |
e0163337 | 4766 | reg->reset(vcpu, reg); |
47334146 JZ |
4767 | } |
4768 | ||
750ed566 JM |
4769 | /** |
4770 | * kvm_reset_sys_regs - sets system registers to reset value | |
4771 | * @vcpu: The VCPU pointer | |
4772 | * | |
4773 | * This function finds the right table above and sets the registers on the | |
4774 | * virtual CPU struct to their architecturally defined reset values. | |
4775 | */ | |
4776 | void kvm_reset_sys_regs(struct kvm_vcpu *vcpu) | |
7c8c5e6a | 4777 | { |
44cbe80b | 4778 | struct kvm *kvm = vcpu->kvm; |
7c8c5e6a MZ |
4779 | unsigned long i; |
4780 | ||
47334146 JZ |
4781 | for (i = 0; i < ARRAY_SIZE(sys_reg_descs); i++) { |
4782 | const struct sys_reg_desc *r = &sys_reg_descs[i]; | |
4783 | ||
44cbe80b | 4784 | if (!r->reset) |
47334146 JZ |
4785 | continue; |
4786 | ||
44cbe80b OU |
4787 | if (is_vm_ftr_id_reg(reg_to_encoding(r))) |
4788 | reset_vm_ftr_id_reg(vcpu, r); | |
e0163337 OU |
4789 | else if (is_vcpu_ftr_id_reg(reg_to_encoding(r))) |
4790 | reset_vcpu_ftr_id_reg(vcpu, r); | |
44cbe80b | 4791 | else |
47334146 | 4792 | r->reset(vcpu, r); |
36f998de MZ |
4793 | |
4794 | if (r->reg >= __SANITISED_REG_START__ && r->reg < NR_SYS_REGS) | |
8800b7c4 | 4795 | __vcpu_rmw_sys_reg(vcpu, r->reg, |=, 0); |
47334146 | 4796 | } |
44cbe80b OU |
4797 | |
4798 | set_bit(KVM_ARCH_FLAG_ID_REGS_INITIALIZED, &kvm->arch.flags); | |
fe535380 AO |
4799 | |
4800 | if (kvm_vcpu_has_pmu(vcpu)) | |
4801 | kvm_make_request(KVM_REQ_RELOAD_PMU, vcpu); | |
7c8c5e6a MZ |
4802 | } |
4803 | ||
4804 | /** | |
89bc63fa MZ |
4805 | * kvm_handle_sys_reg -- handles a system instruction or mrs/msr instruction |
4806 | * trap on a guest execution | |
7c8c5e6a | 4807 | * @vcpu: The VCPU pointer |
7c8c5e6a | 4808 | */ |
74cc7e0c | 4809 | int kvm_handle_sys_reg(struct kvm_vcpu *vcpu) |
7c8c5e6a | 4810 | { |
cc5f84fb | 4811 | const struct sys_reg_desc *desc = NULL; |
7c8c5e6a | 4812 | struct sys_reg_params params; |
3a949f4c | 4813 | unsigned long esr = kvm_vcpu_get_esr(vcpu); |
c667186f | 4814 | int Rt = kvm_vcpu_sys_get_rt(vcpu); |
cc5f84fb | 4815 | int sr_idx; |
7c8c5e6a | 4816 | |
eef8c85a AB |
4817 | trace_kvm_handle_sys_reg(esr); |
4818 | ||
085eabaa | 4819 | if (triage_sysreg_trap(vcpu, &sr_idx)) |
e58ec47b MZ |
4820 | return 1; |
4821 | ||
f76f89e2 | 4822 | params = esr_sys64_to_params(esr); |
2ec5be3d | 4823 | params.regval = vcpu_get_reg(vcpu, Rt); |
7c8c5e6a | 4824 | |
89bc63fa | 4825 | /* System registers have Op0=={2,3}, as per DDI487 J.a C5.1.2 */ |
cc5f84fb MZ |
4826 | if (params.Op0 == 2 || params.Op0 == 3) |
4827 | desc = &sys_reg_descs[sr_idx]; | |
4828 | else | |
4829 | desc = &sys_insn_descs[sr_idx]; | |
89bc63fa | 4830 | |
cc5f84fb | 4831 | perform_access(vcpu, ¶ms, desc); |
89bc63fa | 4832 | |
cc5f84fb MZ |
4833 | /* Read from system register? */ |
4834 | if (!params.is_write && | |
4835 | (params.Op0 == 2 || params.Op0 == 3)) | |
4836 | vcpu_set_reg(vcpu, Rt, params.regval); | |
2ec5be3d | 4837 | |
cc5f84fb | 4838 | return 1; |
7c8c5e6a MZ |
4839 | } |
4840 | ||
4841 | /****************************************************************************** | |
4842 | * Userspace API | |
4843 | *****************************************************************************/ | |
4844 | ||
4845 | static bool index_to_params(u64 id, struct sys_reg_params *params) | |
4846 | { | |
4847 | switch (id & KVM_REG_SIZE_MASK) { | |
4848 | case KVM_REG_SIZE_U64: | |
4849 | /* Any unused index bits means it's not valid. */ | |
4850 | if (id & ~(KVM_REG_ARCH_MASK | KVM_REG_SIZE_MASK | |
4851 | | KVM_REG_ARM_COPROC_MASK | |
4852 | | KVM_REG_ARM64_SYSREG_OP0_MASK | |
4853 | | KVM_REG_ARM64_SYSREG_OP1_MASK | |
4854 | | KVM_REG_ARM64_SYSREG_CRN_MASK | |
4855 | | KVM_REG_ARM64_SYSREG_CRM_MASK | |
4856 | | KVM_REG_ARM64_SYSREG_OP2_MASK)) | |
4857 | return false; | |
4858 | params->Op0 = ((id & KVM_REG_ARM64_SYSREG_OP0_MASK) | |
4859 | >> KVM_REG_ARM64_SYSREG_OP0_SHIFT); | |
4860 | params->Op1 = ((id & KVM_REG_ARM64_SYSREG_OP1_MASK) | |
4861 | >> KVM_REG_ARM64_SYSREG_OP1_SHIFT); | |
4862 | params->CRn = ((id & KVM_REG_ARM64_SYSREG_CRN_MASK) | |
4863 | >> KVM_REG_ARM64_SYSREG_CRN_SHIFT); | |
4864 | params->CRm = ((id & KVM_REG_ARM64_SYSREG_CRM_MASK) | |
4865 | >> KVM_REG_ARM64_SYSREG_CRM_SHIFT); | |
4866 | params->Op2 = ((id & KVM_REG_ARM64_SYSREG_OP2_MASK) | |
4867 | >> KVM_REG_ARM64_SYSREG_OP2_SHIFT); | |
4868 | return true; | |
4869 | default: | |
4870 | return false; | |
4871 | } | |
4872 | } | |
4873 | ||
da8d120f MZ |
4874 | const struct sys_reg_desc *get_reg_by_id(u64 id, |
4875 | const struct sys_reg_desc table[], | |
4876 | unsigned int num) | |
4b927b94 | 4877 | { |
da8d120f MZ |
4878 | struct sys_reg_params params; |
4879 | ||
4880 | if (!index_to_params(id, ¶ms)) | |
4b927b94 VK |
4881 | return NULL; |
4882 | ||
da8d120f | 4883 | return find_reg(¶ms, table, num); |
4b927b94 VK |
4884 | } |
4885 | ||
7c8c5e6a | 4886 | /* Decode an index value, and find the sys_reg_desc entry. */ |
ba23aec9 MZ |
4887 | static const struct sys_reg_desc * |
4888 | id_to_sys_reg_desc(struct kvm_vcpu *vcpu, u64 id, | |
4889 | const struct sys_reg_desc table[], unsigned int num) | |
4890 | ||
7c8c5e6a | 4891 | { |
dcaffa7b | 4892 | const struct sys_reg_desc *r; |
7c8c5e6a MZ |
4893 | |
4894 | /* We only do sys_reg for now. */ | |
4895 | if ((id & KVM_REG_ARM_COPROC_MASK) != KVM_REG_ARM64_SYSREG) | |
4896 | return NULL; | |
4897 | ||
ba23aec9 | 4898 | r = get_reg_by_id(id, table, num); |
7c8c5e6a | 4899 | |
93390c0a | 4900 | /* Not saved in the sys_reg array and not otherwise accessible? */ |
ba23aec9 | 4901 | if (r && (!(r->reg || r->get_user) || sysreg_hidden(vcpu, r))) |
7c8c5e6a MZ |
4902 | r = NULL; |
4903 | ||
4904 | return r; | |
4905 | } | |
4906 | ||
7af0c253 | 4907 | static int demux_c15_get(struct kvm_vcpu *vcpu, u64 id, void __user *uaddr) |
7c8c5e6a MZ |
4908 | { |
4909 | u32 val; | |
4910 | u32 __user *uval = uaddr; | |
4911 | ||
4912 | /* Fail if we have unknown bits set. */ | |
4913 | if (id & ~(KVM_REG_ARCH_MASK|KVM_REG_SIZE_MASK|KVM_REG_ARM_COPROC_MASK | |
4914 | | ((1 << KVM_REG_ARM_COPROC_SHIFT)-1))) | |
4915 | return -ENOENT; | |
4916 | ||
4917 | switch (id & KVM_REG_ARM_DEMUX_ID_MASK) { | |
4918 | case KVM_REG_ARM_DEMUX_ID_CCSIDR: | |
4919 | if (KVM_REG_SIZE(id) != 4) | |
4920 | return -ENOENT; | |
4921 | val = (id & KVM_REG_ARM_DEMUX_VAL_MASK) | |
4922 | >> KVM_REG_ARM_DEMUX_VAL_SHIFT; | |
7af0c253 | 4923 | if (val >= CSSELR_MAX) |
7c8c5e6a MZ |
4924 | return -ENOENT; |
4925 | ||
7af0c253 | 4926 | return put_user(get_ccsidr(vcpu, val), uval); |
7c8c5e6a MZ |
4927 | default: |
4928 | return -ENOENT; | |
4929 | } | |
4930 | } | |
4931 | ||
7af0c253 | 4932 | static int demux_c15_set(struct kvm_vcpu *vcpu, u64 id, void __user *uaddr) |
7c8c5e6a MZ |
4933 | { |
4934 | u32 val, newval; | |
4935 | u32 __user *uval = uaddr; | |
4936 | ||
4937 | /* Fail if we have unknown bits set. */ | |
4938 | if (id & ~(KVM_REG_ARCH_MASK|KVM_REG_SIZE_MASK|KVM_REG_ARM_COPROC_MASK | |
4939 | | ((1 << KVM_REG_ARM_COPROC_SHIFT)-1))) | |
4940 | return -ENOENT; | |
4941 | ||
4942 | switch (id & KVM_REG_ARM_DEMUX_ID_MASK) { | |
4943 | case KVM_REG_ARM_DEMUX_ID_CCSIDR: | |
4944 | if (KVM_REG_SIZE(id) != 4) | |
4945 | return -ENOENT; | |
4946 | val = (id & KVM_REG_ARM_DEMUX_VAL_MASK) | |
4947 | >> KVM_REG_ARM_DEMUX_VAL_SHIFT; | |
7af0c253 | 4948 | if (val >= CSSELR_MAX) |
7c8c5e6a MZ |
4949 | return -ENOENT; |
4950 | ||
4951 | if (get_user(newval, uval)) | |
4952 | return -EFAULT; | |
4953 | ||
7af0c253 | 4954 | return set_ccsidr(vcpu, val, newval); |
7c8c5e6a MZ |
4955 | default: |
4956 | return -ENOENT; | |
4957 | } | |
4958 | } | |
4959 | ||
ba23aec9 MZ |
4960 | int kvm_sys_reg_get_user(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg, |
4961 | const struct sys_reg_desc table[], unsigned int num) | |
7c8c5e6a | 4962 | { |
978ceeb3 | 4963 | u64 __user *uaddr = (u64 __user *)(unsigned long)reg->addr; |
7c8c5e6a | 4964 | const struct sys_reg_desc *r; |
978ceeb3 MZ |
4965 | u64 val; |
4966 | int ret; | |
ba23aec9 MZ |
4967 | |
4968 | r = id_to_sys_reg_desc(vcpu, reg->id, table, num); | |
0746096f | 4969 | if (!r || sysreg_hidden(vcpu, r)) |
ba23aec9 MZ |
4970 | return -ENOENT; |
4971 | ||
978ceeb3 MZ |
4972 | if (r->get_user) { |
4973 | ret = (r->get_user)(vcpu, r, &val); | |
4974 | } else { | |
4975 | val = __vcpu_sys_reg(vcpu, r->reg); | |
4976 | ret = 0; | |
4977 | } | |
4978 | ||
4979 | if (!ret) | |
4980 | ret = put_user(val, uaddr); | |
ba23aec9 | 4981 | |
978ceeb3 | 4982 | return ret; |
ba23aec9 MZ |
4983 | } |
4984 | ||
4985 | int kvm_arm_sys_reg_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) | |
4986 | { | |
7c8c5e6a MZ |
4987 | void __user *uaddr = (void __user *)(unsigned long)reg->addr; |
4988 | ||
4989 | if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_DEMUX) | |
7af0c253 | 4990 | return demux_c15_get(vcpu, reg->id, uaddr); |
7c8c5e6a | 4991 | |
ba23aec9 MZ |
4992 | return kvm_sys_reg_get_user(vcpu, reg, |
4993 | sys_reg_descs, ARRAY_SIZE(sys_reg_descs)); | |
4994 | } | |
7c8c5e6a | 4995 | |
ba23aec9 MZ |
4996 | int kvm_sys_reg_set_user(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg, |
4997 | const struct sys_reg_desc table[], unsigned int num) | |
4998 | { | |
978ceeb3 | 4999 | u64 __user *uaddr = (u64 __user *)(unsigned long)reg->addr; |
ba23aec9 | 5000 | const struct sys_reg_desc *r; |
978ceeb3 MZ |
5001 | u64 val; |
5002 | int ret; | |
5003 | ||
5004 | if (get_user(val, uaddr)) | |
5005 | return -EFAULT; | |
ba23aec9 MZ |
5006 | |
5007 | r = id_to_sys_reg_desc(vcpu, reg->id, table, num); | |
0746096f | 5008 | if (!r || sysreg_hidden(vcpu, r)) |
7f34e409 DM |
5009 | return -ENOENT; |
5010 | ||
4de06e4c OU |
5011 | if (sysreg_user_write_ignore(vcpu, r)) |
5012 | return 0; | |
5013 | ||
978ceeb3 MZ |
5014 | if (r->set_user) { |
5015 | ret = (r->set_user)(vcpu, r, val); | |
5016 | } else { | |
6678791e | 5017 | __vcpu_assign_sys_reg(vcpu, r->reg, val); |
978ceeb3 MZ |
5018 | ret = 0; |
5019 | } | |
84e690bf | 5020 | |
978ceeb3 | 5021 | return ret; |
7c8c5e6a MZ |
5022 | } |
5023 | ||
5024 | int kvm_arm_sys_reg_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) | |
5025 | { | |
7c8c5e6a MZ |
5026 | void __user *uaddr = (void __user *)(unsigned long)reg->addr; |
5027 | ||
5028 | if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_DEMUX) | |
7af0c253 | 5029 | return demux_c15_set(vcpu, reg->id, uaddr); |
7c8c5e6a | 5030 | |
ba23aec9 MZ |
5031 | return kvm_sys_reg_set_user(vcpu, reg, |
5032 | sys_reg_descs, ARRAY_SIZE(sys_reg_descs)); | |
7c8c5e6a MZ |
5033 | } |
5034 | ||
5035 | static unsigned int num_demux_regs(void) | |
5036 | { | |
7af0c253 | 5037 | return CSSELR_MAX; |
7c8c5e6a MZ |
5038 | } |
5039 | ||
5040 | static int write_demux_regids(u64 __user *uindices) | |
5041 | { | |
efd48cea | 5042 | u64 val = KVM_REG_ARM64 | KVM_REG_SIZE_U32 | KVM_REG_ARM_DEMUX; |
7c8c5e6a MZ |
5043 | unsigned int i; |
5044 | ||
5045 | val |= KVM_REG_ARM_DEMUX_ID_CCSIDR; | |
5046 | for (i = 0; i < CSSELR_MAX; i++) { | |
7c8c5e6a MZ |
5047 | if (put_user(val | i, uindices)) |
5048 | return -EFAULT; | |
5049 | uindices++; | |
5050 | } | |
5051 | return 0; | |
5052 | } | |
5053 | ||
5054 | static u64 sys_reg_to_index(const struct sys_reg_desc *reg) | |
5055 | { | |
5056 | return (KVM_REG_ARM64 | KVM_REG_SIZE_U64 | | |
5057 | KVM_REG_ARM64_SYSREG | | |
5058 | (reg->Op0 << KVM_REG_ARM64_SYSREG_OP0_SHIFT) | | |
5059 | (reg->Op1 << KVM_REG_ARM64_SYSREG_OP1_SHIFT) | | |
5060 | (reg->CRn << KVM_REG_ARM64_SYSREG_CRN_SHIFT) | | |
5061 | (reg->CRm << KVM_REG_ARM64_SYSREG_CRM_SHIFT) | | |
5062 | (reg->Op2 << KVM_REG_ARM64_SYSREG_OP2_SHIFT)); | |
5063 | } | |
5064 | ||
5065 | static bool copy_reg_to_user(const struct sys_reg_desc *reg, u64 __user **uind) | |
5066 | { | |
5067 | if (!*uind) | |
5068 | return true; | |
5069 | ||
5070 | if (put_user(sys_reg_to_index(reg), *uind)) | |
5071 | return false; | |
5072 | ||
5073 | (*uind)++; | |
5074 | return true; | |
5075 | } | |
5076 | ||
7f34e409 DM |
5077 | static int walk_one_sys_reg(const struct kvm_vcpu *vcpu, |
5078 | const struct sys_reg_desc *rd, | |
93390c0a DM |
5079 | u64 __user **uind, |
5080 | unsigned int *total) | |
5081 | { | |
5082 | /* | |
5083 | * Ignore registers we trap but don't save, | |
5084 | * and for which no custom user accessor is provided. | |
5085 | */ | |
5086 | if (!(rd->reg || rd->get_user)) | |
5087 | return 0; | |
5088 | ||
0746096f | 5089 | if (sysreg_hidden(vcpu, rd)) |
7f34e409 DM |
5090 | return 0; |
5091 | ||
93390c0a DM |
5092 | if (!copy_reg_to_user(rd, uind)) |
5093 | return -EFAULT; | |
5094 | ||
5095 | (*total)++; | |
5096 | return 0; | |
5097 | } | |
5098 | ||
7c8c5e6a MZ |
5099 | /* Assumed ordered tables, see kvm_sys_reg_table_init. */ |
5100 | static int walk_sys_regs(struct kvm_vcpu *vcpu, u64 __user *uind) | |
5101 | { | |
dcaffa7b | 5102 | const struct sys_reg_desc *i2, *end2; |
7c8c5e6a | 5103 | unsigned int total = 0; |
93390c0a | 5104 | int err; |
7c8c5e6a | 5105 | |
7c8c5e6a MZ |
5106 | i2 = sys_reg_descs; |
5107 | end2 = sys_reg_descs + ARRAY_SIZE(sys_reg_descs); | |
5108 | ||
dcaffa7b JM |
5109 | while (i2 != end2) { |
5110 | err = walk_one_sys_reg(vcpu, i2++, &uind, &total); | |
93390c0a DM |
5111 | if (err) |
5112 | return err; | |
7c8c5e6a MZ |
5113 | } |
5114 | return total; | |
5115 | } | |
5116 | ||
5117 | unsigned long kvm_arm_num_sys_reg_descs(struct kvm_vcpu *vcpu) | |
5118 | { | |
4cd48565 | 5119 | return num_demux_regs() |
7c8c5e6a MZ |
5120 | + walk_sys_regs(vcpu, (u64 __user *)NULL); |
5121 | } | |
5122 | ||
5123 | int kvm_arm_copy_sys_reg_indices(struct kvm_vcpu *vcpu, u64 __user *uindices) | |
5124 | { | |
7c8c5e6a MZ |
5125 | int err; |
5126 | ||
7c8c5e6a MZ |
5127 | err = walk_sys_regs(vcpu, uindices); |
5128 | if (err < 0) | |
5129 | return err; | |
5130 | uindices += err; | |
5131 | ||
5132 | return write_demux_regids(uindices); | |
5133 | } | |
5134 | ||
3f9cd0ca JZ |
5135 | #define KVM_ARM_FEATURE_ID_RANGE_INDEX(r) \ |
5136 | KVM_ARM_FEATURE_ID_RANGE_IDX(sys_reg_Op0(r), \ | |
5137 | sys_reg_Op1(r), \ | |
5138 | sys_reg_CRn(r), \ | |
5139 | sys_reg_CRm(r), \ | |
5140 | sys_reg_Op2(r)) | |
5141 | ||
3f9cd0ca JZ |
5142 | int kvm_vm_ioctl_get_reg_writable_masks(struct kvm *kvm, struct reg_mask_range *range) |
5143 | { | |
5144 | const void *zero_page = page_to_virt(ZERO_PAGE(0)); | |
5145 | u64 __user *masks = (u64 __user *)range->addr; | |
5146 | ||
5147 | /* Only feature id range is supported, reserved[13] must be zero. */ | |
5148 | if (range->range || | |
5149 | memcmp(range->reserved, zero_page, sizeof(range->reserved))) | |
5150 | return -EINVAL; | |
5151 | ||
5152 | /* Wipe the whole thing first */ | |
5153 | if (clear_user(masks, KVM_ARM_FEATURE_ID_RANGE_SIZE * sizeof(__u64))) | |
5154 | return -EFAULT; | |
5155 | ||
5156 | for (int i = 0; i < ARRAY_SIZE(sys_reg_descs); i++) { | |
5157 | const struct sys_reg_desc *reg = &sys_reg_descs[i]; | |
5158 | u32 encoding = reg_to_encoding(reg); | |
5159 | u64 val; | |
5160 | ||
5161 | if (!is_feature_id_reg(encoding) || !reg->set_user) | |
5162 | continue; | |
5163 | ||
bb4fa769 SO |
5164 | if (!reg->val || |
5165 | (is_aa32_id_reg(encoding) && !kvm_supports_32bit_el0())) { | |
5166 | continue; | |
3f9cd0ca | 5167 | } |
bb4fa769 | 5168 | val = reg->val; |
3f9cd0ca JZ |
5169 | |
5170 | if (put_user(val, (masks + KVM_ARM_FEATURE_ID_RANGE_INDEX(encoding)))) | |
5171 | return -EFAULT; | |
5172 | } | |
5173 | ||
5174 | return 0; | |
5175 | } | |
5176 | ||
f1ff3fc5 | 5177 | static void vcpu_set_hcr(struct kvm_vcpu *vcpu) |
c5bac1ef MZ |
5178 | { |
5179 | struct kvm *kvm = vcpu->kvm; | |
5180 | ||
f1ff3fc5 SO |
5181 | if (has_vhe() || has_hvhe()) |
5182 | vcpu->arch.hcr_el2 |= HCR_E2H; | |
5183 | if (cpus_have_final_cap(ARM64_HAS_RAS_EXTN)) { | |
5184 | /* route synchronous external abort exceptions to EL2 */ | |
5185 | vcpu->arch.hcr_el2 |= HCR_TEA; | |
5186 | /* trap error record accesses */ | |
5187 | vcpu->arch.hcr_el2 |= HCR_TERR; | |
5188 | } | |
5189 | ||
5190 | if (cpus_have_final_cap(ARM64_HAS_STAGE2_FWB)) | |
5191 | vcpu->arch.hcr_el2 |= HCR_FWB; | |
5192 | ||
5193 | if (cpus_have_final_cap(ARM64_HAS_EVT) && | |
2843cae2 SO |
5194 | !cpus_have_final_cap(ARM64_MISMATCHED_CACHE_TYPE) && |
5195 | kvm_read_vm_id_reg(kvm, SYS_CTR_EL0) == read_sanitised_ftr_reg(SYS_CTR_EL0)) | |
f1ff3fc5 SO |
5196 | vcpu->arch.hcr_el2 |= HCR_TID4; |
5197 | else | |
5198 | vcpu->arch.hcr_el2 |= HCR_TID2; | |
5199 | ||
5200 | if (vcpu_el1_is_32bit(vcpu)) | |
5201 | vcpu->arch.hcr_el2 &= ~HCR_RW; | |
5202 | ||
5203 | if (kvm_has_mte(vcpu->kvm)) | |
5204 | vcpu->arch.hcr_el2 |= HCR_ATA; | |
c5bac1ef | 5205 | |
8ecdccb9 MZ |
5206 | /* |
5207 | * In the absence of FGT, we cannot independently trap TLBI | |
5208 | * Range instructions. This isn't great, but trapping all | |
5209 | * TLBIs would be far worse. Live with it... | |
5210 | */ | |
5211 | if (!kvm_has_feat(kvm, ID_AA64ISAR0_EL1, TLB, OS)) | |
5212 | vcpu->arch.hcr_el2 |= HCR_TTLBOS; | |
f1ff3fc5 SO |
5213 | } |
5214 | ||
5215 | void kvm_calculate_traps(struct kvm_vcpu *vcpu) | |
5216 | { | |
5217 | struct kvm *kvm = vcpu->kvm; | |
5218 | ||
5219 | mutex_lock(&kvm->arch.config_lock); | |
5220 | vcpu_set_hcr(vcpu); | |
d2137ba8 | 5221 | vcpu_set_ich_hcr(vcpu); |
44f979bf | 5222 | vcpu_set_hcrx(vcpu); |
84de212d | 5223 | |
c5bac1ef MZ |
5224 | if (test_bit(KVM_ARCH_FLAG_FGU_INITIALIZED, &kvm->arch.flags)) |
5225 | goto out; | |
5226 | ||
63d423a7 MZ |
5227 | compute_fgu(kvm, HFGRTR_GROUP); |
5228 | compute_fgu(kvm, HFGITR_GROUP); | |
5229 | compute_fgu(kvm, HDFGRTR_GROUP); | |
5230 | compute_fgu(kvm, HAFGRTR_GROUP); | |
4bc0fe08 MZ |
5231 | compute_fgu(kvm, HFGRTR2_GROUP); |
5232 | compute_fgu(kvm, HFGITR2_GROUP); | |
5233 | compute_fgu(kvm, HDFGRTR2_GROUP); | |
a7f1fa55 | 5234 | |
c5bac1ef MZ |
5235 | set_bit(KVM_ARCH_FLAG_FGU_INITIALIZED, &kvm->arch.flags); |
5236 | out: | |
5237 | mutex_unlock(&kvm->arch.config_lock); | |
5238 | } | |
5239 | ||
795a0bba MZ |
5240 | /* |
5241 | * Perform last adjustments to the ID registers that are implied by the | |
5242 | * configuration outside of the ID regs themselves, as well as any | |
5243 | * initialisation that directly depend on these ID registers (such as | |
5244 | * RES0/RES1 behaviours). This is not the place to configure traps though. | |
5245 | * | |
5246 | * Because this can be called once per CPU, changes must be idempotent. | |
5247 | */ | |
5248 | int kvm_finalize_sys_regs(struct kvm_vcpu *vcpu) | |
5249 | { | |
5250 | struct kvm *kvm = vcpu->kvm; | |
5251 | ||
5252 | guard(mutex)(&kvm->arch.config_lock); | |
5253 | ||
5cb57a1a MZ |
5254 | if (!(static_branch_unlikely(&kvm_vgic_global_state.gicv3_cpuif) && |
5255 | irqchip_in_kernel(kvm) && | |
5256 | kvm->arch.vgic.vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3)) { | |
5257 | kvm->arch.id_regs[IDREG_IDX(SYS_ID_AA64PFR0_EL1)] &= ~ID_AA64PFR0_EL1_GIC_MASK; | |
5258 | kvm->arch.id_regs[IDREG_IDX(SYS_ID_PFR1_EL1)] &= ~ID_PFR1_EL1_GIC_MASK; | |
5259 | } | |
5260 | ||
795a0bba | 5261 | if (vcpu_has_nv(vcpu)) { |
36f998de | 5262 | int ret = kvm_init_nv_sysregs(vcpu); |
795a0bba MZ |
5263 | if (ret) |
5264 | return ret; | |
5265 | } | |
5266 | ||
5267 | return 0; | |
5268 | } | |
5269 | ||
8d20bd63 | 5270 | int __init kvm_sys_reg_table_init(void) |
7c8c5e6a | 5271 | { |
f1f0c0cf | 5272 | bool valid = true; |
7c8c5e6a | 5273 | unsigned int i; |
19f3e7ea | 5274 | int ret = 0; |
7c8c5e6a MZ |
5275 | |
5276 | /* Make sure tables are unique and in order. */ | |
f1f0c0cf AE |
5277 | valid &= check_sysreg_table(sys_reg_descs, ARRAY_SIZE(sys_reg_descs), false); |
5278 | valid &= check_sysreg_table(cp14_regs, ARRAY_SIZE(cp14_regs), true); | |
5279 | valid &= check_sysreg_table(cp14_64_regs, ARRAY_SIZE(cp14_64_regs), true); | |
5280 | valid &= check_sysreg_table(cp15_regs, ARRAY_SIZE(cp15_regs), true); | |
5281 | valid &= check_sysreg_table(cp15_64_regs, ARRAY_SIZE(cp15_64_regs), true); | |
89bc63fa | 5282 | valid &= check_sysreg_table(sys_insn_descs, ARRAY_SIZE(sys_insn_descs), false); |
f1f0c0cf AE |
5283 | |
5284 | if (!valid) | |
5285 | return -EINVAL; | |
7c8c5e6a | 5286 | |
4cd48565 | 5287 | init_imp_id_regs(); |
7c8c5e6a | 5288 | |
19f3e7ea MZ |
5289 | ret = populate_nv_trap_config(); |
5290 | ||
938a79d0 MZ |
5291 | check_feature_map(); |
5292 | ||
19f3e7ea MZ |
5293 | for (i = 0; !ret && i < ARRAY_SIZE(sys_reg_descs); i++) |
5294 | ret = populate_sysreg_config(sys_reg_descs + i, i); | |
5295 | ||
5296 | for (i = 0; !ret && i < ARRAY_SIZE(sys_insn_descs); i++) | |
5297 | ret = populate_sysreg_config(sys_insn_descs + i, i); | |
5298 | ||
5299 | return ret; | |
7c8c5e6a | 5300 | } |