KVM: arm64: Make PIR{,E0}_EL1 UNDEF if S1PIE is not advertised to the guest
[linux-2.6-block.git] / arch / arm64 / kvm / sys_regs.c
CommitLineData
caab277b 1// SPDX-License-Identifier: GPL-2.0-only
7c8c5e6a
MZ
2/*
3 * Copyright (C) 2012,2013 - ARM Ltd
4 * Author: Marc Zyngier <marc.zyngier@arm.com>
5 *
6 * Derived from arch/arm/kvm/coproc.c:
7 * Copyright (C) 2012 - Virtual Open Systems and Columbia University
8 * Authors: Rusty Russell <rusty@rustcorp.com.au>
9 * Christoffer Dall <c.dall@virtualopensystems.com>
7c8c5e6a
MZ
10 */
11
c8857935 12#include <linux/bitfield.h>
623eefa8 13#include <linux/bsearch.h>
7af0c253 14#include <linux/cacheinfo.h>
7c8c5e6a 15#include <linux/kvm_host.h>
c6d01a94 16#include <linux/mm.h>
07d79fe7 17#include <linux/printk.h>
7c8c5e6a 18#include <linux/uaccess.h>
c6d01a94 19
7c8c5e6a
MZ
20#include <asm/cacheflush.h>
21#include <asm/cputype.h>
0c557ed4 22#include <asm/debug-monitors.h>
c6d01a94
MR
23#include <asm/esr.h>
24#include <asm/kvm_arm.h>
c6d01a94 25#include <asm/kvm_emulate.h>
d47533da 26#include <asm/kvm_hyp.h>
c6d01a94 27#include <asm/kvm_mmu.h>
6ff9dc23 28#include <asm/kvm_nested.h>
ab946834 29#include <asm/perf_event.h>
1f3d8699 30#include <asm/sysreg.h>
c6d01a94 31
7c8c5e6a
MZ
32#include <trace/events/kvm.h>
33
34#include "sys_regs.h"
35
eef8c85a
AB
36#include "trace.h"
37
7c8c5e6a 38/*
62a89c44
MZ
39 * For AArch32, we only take care of what is being trapped. Anything
40 * that has to do with init and userspace access has to go via the
41 * 64bit interface.
7c8c5e6a
MZ
42 */
43
f24adc65 44static u64 sys_reg_to_index(const struct sys_reg_desc *reg);
c118cead
JZ
45static int set_id_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
46 u64 val);
f24adc65 47
2733dd10
MZ
48static bool bad_trap(struct kvm_vcpu *vcpu,
49 struct sys_reg_params *params,
50 const struct sys_reg_desc *r,
51 const char *msg)
7b5b4df1 52{
2733dd10 53 WARN_ONCE(1, "Unexpected %s\n", msg);
7b5b4df1
MZ
54 print_sys_reg_instr(params);
55 kvm_inject_undefined(vcpu);
56 return false;
57}
58
2733dd10
MZ
59static bool read_from_write_only(struct kvm_vcpu *vcpu,
60 struct sys_reg_params *params,
61 const struct sys_reg_desc *r)
62{
63 return bad_trap(vcpu, params, r,
64 "sys_reg read to write-only register");
65}
66
7b1dba1f
MZ
67static bool write_to_read_only(struct kvm_vcpu *vcpu,
68 struct sys_reg_params *params,
69 const struct sys_reg_desc *r)
70{
2733dd10
MZ
71 return bad_trap(vcpu, params, r,
72 "sys_reg write to read-only register");
7b1dba1f
MZ
73}
74
fedc6123
MZ
75#define PURE_EL2_SYSREG(el2) \
76 case el2: { \
77 *el1r = el2; \
78 return true; \
79 }
80
81#define MAPPED_EL2_SYSREG(el2, el1, fn) \
82 case el2: { \
83 *xlate = fn; \
84 *el1r = el1; \
85 return true; \
86 }
87
88static bool get_el2_to_el1_mapping(unsigned int reg,
89 unsigned int *el1r, u64 (**xlate)(u64))
90{
91 switch (reg) {
92 PURE_EL2_SYSREG( VPIDR_EL2 );
93 PURE_EL2_SYSREG( VMPIDR_EL2 );
94 PURE_EL2_SYSREG( ACTLR_EL2 );
95 PURE_EL2_SYSREG( HCR_EL2 );
96 PURE_EL2_SYSREG( MDCR_EL2 );
97 PURE_EL2_SYSREG( HSTR_EL2 );
98 PURE_EL2_SYSREG( HACR_EL2 );
99 PURE_EL2_SYSREG( VTTBR_EL2 );
100 PURE_EL2_SYSREG( VTCR_EL2 );
101 PURE_EL2_SYSREG( RVBAR_EL2 );
102 PURE_EL2_SYSREG( TPIDR_EL2 );
103 PURE_EL2_SYSREG( HPFAR_EL2 );
104 PURE_EL2_SYSREG( CNTHCTL_EL2 );
105 MAPPED_EL2_SYSREG(SCTLR_EL2, SCTLR_EL1,
106 translate_sctlr_el2_to_sctlr_el1 );
107 MAPPED_EL2_SYSREG(CPTR_EL2, CPACR_EL1,
108 translate_cptr_el2_to_cpacr_el1 );
109 MAPPED_EL2_SYSREG(TTBR0_EL2, TTBR0_EL1,
110 translate_ttbr0_el2_to_ttbr0_el1 );
111 MAPPED_EL2_SYSREG(TTBR1_EL2, TTBR1_EL1, NULL );
112 MAPPED_EL2_SYSREG(TCR_EL2, TCR_EL1,
113 translate_tcr_el2_to_tcr_el1 );
114 MAPPED_EL2_SYSREG(VBAR_EL2, VBAR_EL1, NULL );
115 MAPPED_EL2_SYSREG(AFSR0_EL2, AFSR0_EL1, NULL );
116 MAPPED_EL2_SYSREG(AFSR1_EL2, AFSR1_EL1, NULL );
117 MAPPED_EL2_SYSREG(ESR_EL2, ESR_EL1, NULL );
118 MAPPED_EL2_SYSREG(FAR_EL2, FAR_EL1, NULL );
119 MAPPED_EL2_SYSREG(MAIR_EL2, MAIR_EL1, NULL );
120 MAPPED_EL2_SYSREG(AMAIR_EL2, AMAIR_EL1, NULL );
121 MAPPED_EL2_SYSREG(ELR_EL2, ELR_EL1, NULL );
122 MAPPED_EL2_SYSREG(SPSR_EL2, SPSR_EL1, NULL );
123 default:
124 return false;
125 }
7b1dba1f
MZ
126}
127
7ea90bdd
MZ
128u64 vcpu_read_sys_reg(const struct kvm_vcpu *vcpu, int reg)
129{
130 u64 val = 0x8badf00d8badf00d;
fedc6123
MZ
131 u64 (*xlate)(u64) = NULL;
132 unsigned int el1r;
133
134 if (!vcpu_get_flag(vcpu, SYSREGS_ON_CPU))
135 goto memory_read;
7ea90bdd 136
fedc6123
MZ
137 if (unlikely(get_el2_to_el1_mapping(reg, &el1r, &xlate))) {
138 if (!is_hyp_ctxt(vcpu))
139 goto memory_read;
140
141 /*
142 * If this register does not have an EL1 counterpart,
143 * then read the stored EL2 version.
144 */
145 if (reg == el1r)
146 goto memory_read;
147
148 /*
149 * If we have a non-VHE guest and that the sysreg
150 * requires translation to be used at EL1, use the
151 * in-memory copy instead.
152 */
153 if (!vcpu_el2_e2h_is_set(vcpu) && xlate)
154 goto memory_read;
155
156 /* Get the current version of the EL1 counterpart. */
157 WARN_ON(!__vcpu_read_sys_reg_from_cpu(el1r, &val));
7ea90bdd 158 return val;
fedc6123 159 }
7ea90bdd 160
fedc6123
MZ
161 /* EL1 register can't be on the CPU if the guest is in vEL2. */
162 if (unlikely(is_hyp_ctxt(vcpu)))
163 goto memory_read;
164
165 if (__vcpu_read_sys_reg_from_cpu(reg, &val))
166 return val;
167
168memory_read:
7ea90bdd
MZ
169 return __vcpu_sys_reg(vcpu, reg);
170}
171
172void vcpu_write_sys_reg(struct kvm_vcpu *vcpu, u64 val, int reg)
173{
fedc6123
MZ
174 u64 (*xlate)(u64) = NULL;
175 unsigned int el1r;
176
177 if (!vcpu_get_flag(vcpu, SYSREGS_ON_CPU))
178 goto memory_write;
179
180 if (unlikely(get_el2_to_el1_mapping(reg, &el1r, &xlate))) {
181 if (!is_hyp_ctxt(vcpu))
182 goto memory_write;
183
184 /*
185 * Always store a copy of the write to memory to avoid having
186 * to reverse-translate virtual EL2 system registers for a
187 * non-VHE guest hypervisor.
188 */
189 __vcpu_sys_reg(vcpu, reg) = val;
190
191 /* No EL1 counterpart? We're done here.? */
192 if (reg == el1r)
193 return;
194
195 if (!vcpu_el2_e2h_is_set(vcpu) && xlate)
196 val = xlate(val);
197
198 /* Redirect this to the EL1 version of the register. */
199 WARN_ON(!__vcpu_write_sys_reg_to_cpu(val, el1r));
200 return;
201 }
202
203 /* EL1 register can't be on the CPU if the guest is in vEL2. */
204 if (unlikely(is_hyp_ctxt(vcpu)))
205 goto memory_write;
206
207 if (__vcpu_write_sys_reg_to_cpu(val, reg))
7ea90bdd
MZ
208 return;
209
fedc6123
MZ
210memory_write:
211 __vcpu_sys_reg(vcpu, reg) = val;
d47533da
CD
212}
213
7c8c5e6a 214/* CSSELR values; used to index KVM_REG_ARM_DEMUX_ID_CCSIDR */
c73a4416 215#define CSSELR_MAX 14
7c8c5e6a 216
7af0c253
AO
217/*
218 * Returns the minimum line size for the selected cache, expressed as
219 * Log2(bytes).
220 */
221static u8 get_min_cache_line_size(bool icache)
222{
223 u64 ctr = read_sanitised_ftr_reg(SYS_CTR_EL0);
224 u8 field;
225
226 if (icache)
227 field = SYS_FIELD_GET(CTR_EL0, IminLine, ctr);
228 else
229 field = SYS_FIELD_GET(CTR_EL0, DminLine, ctr);
230
231 /*
232 * Cache line size is represented as Log2(words) in CTR_EL0.
233 * Log2(bytes) can be derived with the following:
234 *
235 * Log2(words) + 2 = Log2(bytes / 4) + 2
236 * = Log2(bytes) - 2 + 2
237 * = Log2(bytes)
238 */
239 return field + 2;
240}
241
7c8c5e6a 242/* Which cache CCSIDR represents depends on CSSELR value. */
7af0c253
AO
243static u32 get_ccsidr(struct kvm_vcpu *vcpu, u32 csselr)
244{
245 u8 line_size;
246
247 if (vcpu->arch.ccsidr)
248 return vcpu->arch.ccsidr[csselr];
249
250 line_size = get_min_cache_line_size(csselr & CSSELR_EL1_InD);
251
252 /*
253 * Fabricate a CCSIDR value as the overriding value does not exist.
254 * The real CCSIDR value will not be used as it can vary by the
255 * physical CPU which the vcpu currently resides in.
256 *
257 * The line size is determined with get_min_cache_line_size(), which
258 * should be valid for all CPUs even if they have different cache
259 * configuration.
260 *
261 * The associativity bits are cleared, meaning the geometry of all data
262 * and unified caches (which are guaranteed to be PIPT and thus
263 * non-aliasing) are 1 set and 1 way.
264 * Guests should not be doing cache operations by set/way at all, and
265 * for this reason, we trap them and attempt to infer the intent, so
266 * that we can flush the entire guest's address space at the appropriate
267 * time. The exposed geometry minimizes the number of the traps.
268 * [If guests should attempt to infer aliasing properties from the
269 * geometry (which is not permitted by the architecture), they would
270 * only do so for virtually indexed caches.]
271 *
272 * We don't check if the cache level exists as it is allowed to return
273 * an UNKNOWN value if not.
274 */
275 return SYS_FIELD_PREP(CCSIDR_EL1, LineSize, line_size - 4);
276}
277
278static int set_ccsidr(struct kvm_vcpu *vcpu, u32 csselr, u32 val)
7c8c5e6a 279{
7af0c253
AO
280 u8 line_size = FIELD_GET(CCSIDR_EL1_LineSize, val) + 4;
281 u32 *ccsidr = vcpu->arch.ccsidr;
282 u32 i;
283
284 if ((val & CCSIDR_EL1_RES0) ||
285 line_size < get_min_cache_line_size(csselr & CSSELR_EL1_InD))
286 return -EINVAL;
287
288 if (!ccsidr) {
289 if (val == get_ccsidr(vcpu, csselr))
290 return 0;
7c8c5e6a 291
5f623a59 292 ccsidr = kmalloc_array(CSSELR_MAX, sizeof(u32), GFP_KERNEL_ACCOUNT);
7af0c253
AO
293 if (!ccsidr)
294 return -ENOMEM;
7c8c5e6a 295
7af0c253
AO
296 for (i = 0; i < CSSELR_MAX; i++)
297 ccsidr[i] = get_ccsidr(vcpu, i);
298
299 vcpu->arch.ccsidr = ccsidr;
300 }
7c8c5e6a 301
7af0c253 302 ccsidr[csselr] = val;
7c8c5e6a 303
7af0c253 304 return 0;
7c8c5e6a
MZ
305}
306
6ff9dc23
JL
307static bool access_rw(struct kvm_vcpu *vcpu,
308 struct sys_reg_params *p,
309 const struct sys_reg_desc *r)
310{
311 if (p->is_write)
312 vcpu_write_sys_reg(vcpu, p->regval, r->reg);
313 else
314 p->regval = vcpu_read_sys_reg(vcpu, r->reg);
315
316 return true;
317}
318
3c1e7165
MZ
319/*
320 * See note at ARMv7 ARM B1.14.4 (TL;DR: S/W ops are not easily virtualized).
321 */
7c8c5e6a 322static bool access_dcsw(struct kvm_vcpu *vcpu,
3fec037d 323 struct sys_reg_params *p,
7c8c5e6a
MZ
324 const struct sys_reg_desc *r)
325{
7c8c5e6a 326 if (!p->is_write)
e7f1d1ee 327 return read_from_write_only(vcpu, p, r);
7c8c5e6a 328
09605e94
MZ
329 /*
330 * Only track S/W ops if we don't have FWB. It still indicates
331 * that the guest is a bit broken (S/W operations should only
332 * be done by firmware, knowing that there is only a single
333 * CPU left in the system, and certainly not from non-secure
334 * software).
335 */
d8569fba 336 if (!cpus_have_final_cap(ARM64_HAS_STAGE2_FWB))
09605e94
MZ
337 kvm_set_way_flush(vcpu);
338
7c8c5e6a
MZ
339 return true;
340}
341
d282fa3c
MZ
342static bool access_dcgsw(struct kvm_vcpu *vcpu,
343 struct sys_reg_params *p,
344 const struct sys_reg_desc *r)
345{
346 if (!kvm_has_mte(vcpu->kvm)) {
347 kvm_inject_undefined(vcpu);
348 return false;
349 }
350
351 /* Treat MTE S/W ops as we treat the classic ones: with contempt */
352 return access_dcsw(vcpu, p, r);
353}
354
b1ea1d76
MZ
355static void get_access_mask(const struct sys_reg_desc *r, u64 *mask, u64 *shift)
356{
357 switch (r->aarch32_map) {
358 case AA32_LO:
359 *mask = GENMASK_ULL(31, 0);
360 *shift = 0;
361 break;
362 case AA32_HI:
363 *mask = GENMASK_ULL(63, 32);
364 *shift = 32;
365 break;
366 default:
367 *mask = GENMASK_ULL(63, 0);
368 *shift = 0;
369 break;
370 }
371}
372
4d44923b
MZ
373/*
374 * Generic accessor for VM registers. Only called as long as HCR_TVM
3c1e7165
MZ
375 * is set. If the guest enables the MMU, we stop trapping the VM
376 * sys_regs and leave it in complete control of the caches.
4d44923b
MZ
377 */
378static bool access_vm_reg(struct kvm_vcpu *vcpu,
3fec037d 379 struct sys_reg_params *p,
4d44923b
MZ
380 const struct sys_reg_desc *r)
381{
3c1e7165 382 bool was_enabled = vcpu_has_cache_enabled(vcpu);
b1ea1d76 383 u64 val, mask, shift;
4d44923b
MZ
384
385 BUG_ON(!p->is_write);
386
b1ea1d76 387 get_access_mask(r, &mask, &shift);
52f6c4f0 388
b1ea1d76
MZ
389 if (~mask) {
390 val = vcpu_read_sys_reg(vcpu, r->reg);
391 val &= ~mask;
dedf97e8 392 } else {
b1ea1d76 393 val = 0;
dedf97e8 394 }
b1ea1d76
MZ
395
396 val |= (p->regval & (mask >> shift)) << shift;
397 vcpu_write_sys_reg(vcpu, val, r->reg);
f0a3eaff 398
3c1e7165 399 kvm_toggle_cache(vcpu, was_enabled);
4d44923b
MZ
400 return true;
401}
402
af473829
JM
403static bool access_actlr(struct kvm_vcpu *vcpu,
404 struct sys_reg_params *p,
405 const struct sys_reg_desc *r)
406{
b1ea1d76
MZ
407 u64 mask, shift;
408
af473829
JM
409 if (p->is_write)
410 return ignore_write(vcpu, p);
411
b1ea1d76
MZ
412 get_access_mask(r, &mask, &shift);
413 p->regval = (vcpu_read_sys_reg(vcpu, r->reg) & mask) >> shift;
af473829
JM
414
415 return true;
416}
417
6d52f35a
AP
418/*
419 * Trap handler for the GICv3 SGI generation system register.
420 * Forward the request to the VGIC emulation.
421 * The cp15_64 code makes sure this automatically works
422 * for both AArch64 and AArch32 accesses.
423 */
424static bool access_gic_sgi(struct kvm_vcpu *vcpu,
3fec037d 425 struct sys_reg_params *p,
6d52f35a
AP
426 const struct sys_reg_desc *r)
427{
03bd646d
MZ
428 bool g1;
429
6d52f35a 430 if (!p->is_write)
e7f1d1ee 431 return read_from_write_only(vcpu, p, r);
6d52f35a 432
03bd646d
MZ
433 /*
434 * In a system where GICD_CTLR.DS=1, a ICC_SGI0R_EL1 access generates
435 * Group0 SGIs only, while ICC_SGI1R_EL1 can generate either group,
436 * depending on the SGI configuration. ICC_ASGI1R_EL1 is effectively
437 * equivalent to ICC_SGI0R_EL1, as there is no "alternative" secure
438 * group.
439 */
50f30453 440 if (p->Op0 == 0) { /* AArch32 */
03bd646d
MZ
441 switch (p->Op1) {
442 default: /* Keep GCC quiet */
443 case 0: /* ICC_SGI1R */
444 g1 = true;
445 break;
446 case 1: /* ICC_ASGI1R */
447 case 2: /* ICC_SGI0R */
448 g1 = false;
449 break;
450 }
50f30453 451 } else { /* AArch64 */
03bd646d
MZ
452 switch (p->Op2) {
453 default: /* Keep GCC quiet */
454 case 5: /* ICC_SGI1R_EL1 */
455 g1 = true;
456 break;
457 case 6: /* ICC_ASGI1R_EL1 */
458 case 7: /* ICC_SGI0R_EL1 */
459 g1 = false;
460 break;
461 }
462 }
463
464 vgic_v3_dispatch_sgi(vcpu, p->regval, g1);
6d52f35a
AP
465
466 return true;
467}
468
b34f2bcb
MZ
469static bool access_gic_sre(struct kvm_vcpu *vcpu,
470 struct sys_reg_params *p,
471 const struct sys_reg_desc *r)
472{
473 if (p->is_write)
474 return ignore_write(vcpu, p);
475
476 p->regval = vcpu->arch.vgic_cpu.vgic_v3.vgic_sre;
477 return true;
478}
479
7609c125 480static bool trap_raz_wi(struct kvm_vcpu *vcpu,
3fec037d 481 struct sys_reg_params *p,
7609c125 482 const struct sys_reg_desc *r)
7c8c5e6a
MZ
483{
484 if (p->is_write)
485 return ignore_write(vcpu, p);
486 else
487 return read_zero(vcpu, p);
488}
489
6ff9dc23
JL
490static bool trap_undef(struct kvm_vcpu *vcpu,
491 struct sys_reg_params *p,
492 const struct sys_reg_desc *r)
493{
494 kvm_inject_undefined(vcpu);
495 return false;
496}
497
22925521
MZ
498/*
499 * ARMv8.1 mandates at least a trivial LORegion implementation, where all the
500 * RW registers are RES0 (which we can implement as RAZ/WI). On an ARMv8.0
501 * system, these registers should UNDEF. LORID_EL1 being a RO register, we
502 * treat it separately.
503 */
504static bool trap_loregion(struct kvm_vcpu *vcpu,
505 struct sys_reg_params *p,
506 const struct sys_reg_desc *r)
cc33c4e2 507{
7ba8b438 508 u32 sr = reg_to_encoding(r);
22925521 509
c62d7a23 510 if (!kvm_has_feat(vcpu->kvm, ID_AA64MMFR1_EL1, LO, IMP)) {
22925521
MZ
511 kvm_inject_undefined(vcpu);
512 return false;
513 }
514
515 if (p->is_write && sr == SYS_LORID_EL1)
516 return write_to_read_only(vcpu, p, r);
517
518 return trap_raz_wi(vcpu, p, r);
cc33c4e2
MR
519}
520
f24adc65
OU
521static bool trap_oslar_el1(struct kvm_vcpu *vcpu,
522 struct sys_reg_params *p,
523 const struct sys_reg_desc *r)
524{
525 u64 oslsr;
526
527 if (!p->is_write)
528 return read_from_write_only(vcpu, p, r);
529
530 /* Forward the OSLK bit to OSLSR */
187de7c2
MB
531 oslsr = __vcpu_sys_reg(vcpu, OSLSR_EL1) & ~OSLSR_EL1_OSLK;
532 if (p->regval & OSLAR_EL1_OSLK)
533 oslsr |= OSLSR_EL1_OSLK;
f24adc65
OU
534
535 __vcpu_sys_reg(vcpu, OSLSR_EL1) = oslsr;
536 return true;
537}
538
0c557ed4 539static bool trap_oslsr_el1(struct kvm_vcpu *vcpu,
3fec037d 540 struct sys_reg_params *p,
0c557ed4
MZ
541 const struct sys_reg_desc *r)
542{
d42e2671 543 if (p->is_write)
e2ffceaa 544 return write_to_read_only(vcpu, p, r);
d42e2671
OU
545
546 p->regval = __vcpu_sys_reg(vcpu, r->reg);
547 return true;
548}
549
550static int set_oslsr_el1(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
978ceeb3 551 u64 val)
d42e2671 552{
f24adc65
OU
553 /*
554 * The only modifiable bit is the OSLK bit. Refuse the write if
555 * userspace attempts to change any other bit in the register.
556 */
187de7c2 557 if ((val ^ rd->val) & ~OSLSR_EL1_OSLK)
d42e2671
OU
558 return -EINVAL;
559
f24adc65 560 __vcpu_sys_reg(vcpu, rd->reg) = val;
d42e2671 561 return 0;
0c557ed4
MZ
562}
563
564static bool trap_dbgauthstatus_el1(struct kvm_vcpu *vcpu,
3fec037d 565 struct sys_reg_params *p,
0c557ed4
MZ
566 const struct sys_reg_desc *r)
567{
568 if (p->is_write) {
569 return ignore_write(vcpu, p);
570 } else {
1f3d8699 571 p->regval = read_sysreg(dbgauthstatus_el1);
0c557ed4
MZ
572 return true;
573 }
574}
575
576/*
577 * We want to avoid world-switching all the DBG registers all the
578 * time:
e6bc555c 579 *
0c557ed4
MZ
580 * - If we've touched any debug register, it is likely that we're
581 * going to touch more of them. It then makes sense to disable the
582 * traps and start doing the save/restore dance
583 * - If debug is active (DBG_MDSCR_KDE or DBG_MDSCR_MDE set), it is
584 * then mandatory to save/restore the registers, as the guest
585 * depends on them.
e6bc555c 586 *
0c557ed4
MZ
587 * For this, we use a DIRTY bit, indicating the guest has modified the
588 * debug registers, used as follow:
589 *
590 * On guest entry:
591 * - If the dirty bit is set (because we're coming back from trapping),
592 * disable the traps, save host registers, restore guest registers.
593 * - If debug is actively in use (DBG_MDSCR_KDE or DBG_MDSCR_MDE set),
594 * set the dirty bit, disable the traps, save host registers,
595 * restore guest registers.
596 * - Otherwise, enable the traps
597 *
598 * On guest exit:
599 * - If the dirty bit is set, save guest registers, restore host
600 * registers and clear the dirty bit. This ensure that the host can
601 * now use the debug registers.
602 */
603static bool trap_debug_regs(struct kvm_vcpu *vcpu,
3fec037d 604 struct sys_reg_params *p,
0c557ed4
MZ
605 const struct sys_reg_desc *r)
606{
6ff9dc23
JL
607 access_rw(vcpu, p, r);
608 if (p->is_write)
b1da4908 609 vcpu_set_flag(vcpu, DEBUG_DIRTY);
0c557ed4 610
2ec5be3d 611 trace_trap_reg(__func__, r->reg, p->is_write, p->regval);
eef8c85a 612
0c557ed4
MZ
613 return true;
614}
615
84e690bf
AB
616/*
617 * reg_to_dbg/dbg_to_reg
618 *
619 * A 32 bit write to a debug register leave top bits alone
620 * A 32 bit read from a debug register only returns the bottom bits
621 *
b1da4908
MZ
622 * All writes will set the DEBUG_DIRTY flag to ensure the hyp code
623 * switches between host and guest values in future.
84e690bf 624 */
281243cb
MZ
625static void reg_to_dbg(struct kvm_vcpu *vcpu,
626 struct sys_reg_params *p,
1da42c34 627 const struct sys_reg_desc *rd,
281243cb 628 u64 *dbg_reg)
84e690bf 629{
1da42c34 630 u64 mask, shift, val;
84e690bf 631
1da42c34 632 get_access_mask(rd, &mask, &shift);
84e690bf 633
1da42c34
MZ
634 val = *dbg_reg;
635 val &= ~mask;
636 val |= (p->regval & (mask >> shift)) << shift;
84e690bf 637 *dbg_reg = val;
1da42c34 638
b1da4908 639 vcpu_set_flag(vcpu, DEBUG_DIRTY);
84e690bf
AB
640}
641
281243cb
MZ
642static void dbg_to_reg(struct kvm_vcpu *vcpu,
643 struct sys_reg_params *p,
1da42c34 644 const struct sys_reg_desc *rd,
281243cb 645 u64 *dbg_reg)
84e690bf 646{
1da42c34
MZ
647 u64 mask, shift;
648
649 get_access_mask(rd, &mask, &shift);
650 p->regval = (*dbg_reg & mask) >> shift;
84e690bf
AB
651}
652
281243cb
MZ
653static bool trap_bvr(struct kvm_vcpu *vcpu,
654 struct sys_reg_params *p,
655 const struct sys_reg_desc *rd)
84e690bf 656{
cb853ded 657 u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_bvr[rd->CRm];
84e690bf
AB
658
659 if (p->is_write)
1da42c34 660 reg_to_dbg(vcpu, p, rd, dbg_reg);
84e690bf 661 else
1da42c34 662 dbg_to_reg(vcpu, p, rd, dbg_reg);
84e690bf 663
cb853ded 664 trace_trap_reg(__func__, rd->CRm, p->is_write, *dbg_reg);
eef8c85a 665
84e690bf
AB
666 return true;
667}
668
669static int set_bvr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
978ceeb3 670 u64 val)
84e690bf 671{
978ceeb3 672 vcpu->arch.vcpu_debug_state.dbg_bvr[rd->CRm] = val;
84e690bf
AB
673 return 0;
674}
675
676static int get_bvr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
978ceeb3 677 u64 *val)
84e690bf 678{
978ceeb3 679 *val = vcpu->arch.vcpu_debug_state.dbg_bvr[rd->CRm];
84e690bf
AB
680 return 0;
681}
682
d86cde6e 683static u64 reset_bvr(struct kvm_vcpu *vcpu,
281243cb 684 const struct sys_reg_desc *rd)
84e690bf 685{
cb853ded 686 vcpu->arch.vcpu_debug_state.dbg_bvr[rd->CRm] = rd->val;
d86cde6e 687 return rd->val;
84e690bf
AB
688}
689
281243cb
MZ
690static bool trap_bcr(struct kvm_vcpu *vcpu,
691 struct sys_reg_params *p,
692 const struct sys_reg_desc *rd)
84e690bf 693{
cb853ded 694 u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_bcr[rd->CRm];
84e690bf
AB
695
696 if (p->is_write)
1da42c34 697 reg_to_dbg(vcpu, p, rd, dbg_reg);
84e690bf 698 else
1da42c34 699 dbg_to_reg(vcpu, p, rd, dbg_reg);
84e690bf 700
cb853ded 701 trace_trap_reg(__func__, rd->CRm, p->is_write, *dbg_reg);
eef8c85a 702
84e690bf
AB
703 return true;
704}
705
706static int set_bcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
978ceeb3 707 u64 val)
84e690bf 708{
978ceeb3 709 vcpu->arch.vcpu_debug_state.dbg_bcr[rd->CRm] = val;
84e690bf
AB
710 return 0;
711}
712
713static int get_bcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
978ceeb3 714 u64 *val)
84e690bf 715{
978ceeb3 716 *val = vcpu->arch.vcpu_debug_state.dbg_bcr[rd->CRm];
84e690bf
AB
717 return 0;
718}
719
d86cde6e 720static u64 reset_bcr(struct kvm_vcpu *vcpu,
281243cb 721 const struct sys_reg_desc *rd)
84e690bf 722{
cb853ded 723 vcpu->arch.vcpu_debug_state.dbg_bcr[rd->CRm] = rd->val;
d86cde6e 724 return rd->val;
84e690bf
AB
725}
726
281243cb
MZ
727static bool trap_wvr(struct kvm_vcpu *vcpu,
728 struct sys_reg_params *p,
729 const struct sys_reg_desc *rd)
84e690bf 730{
cb853ded 731 u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_wvr[rd->CRm];
84e690bf
AB
732
733 if (p->is_write)
1da42c34 734 reg_to_dbg(vcpu, p, rd, dbg_reg);
84e690bf 735 else
1da42c34 736 dbg_to_reg(vcpu, p, rd, dbg_reg);
84e690bf 737
cb853ded
MZ
738 trace_trap_reg(__func__, rd->CRm, p->is_write,
739 vcpu->arch.vcpu_debug_state.dbg_wvr[rd->CRm]);
eef8c85a 740
84e690bf
AB
741 return true;
742}
743
744static int set_wvr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
978ceeb3 745 u64 val)
84e690bf 746{
978ceeb3 747 vcpu->arch.vcpu_debug_state.dbg_wvr[rd->CRm] = val;
84e690bf
AB
748 return 0;
749}
750
751static int get_wvr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
978ceeb3 752 u64 *val)
84e690bf 753{
978ceeb3 754 *val = vcpu->arch.vcpu_debug_state.dbg_wvr[rd->CRm];
84e690bf
AB
755 return 0;
756}
757
d86cde6e 758static u64 reset_wvr(struct kvm_vcpu *vcpu,
281243cb 759 const struct sys_reg_desc *rd)
84e690bf 760{
cb853ded 761 vcpu->arch.vcpu_debug_state.dbg_wvr[rd->CRm] = rd->val;
d86cde6e 762 return rd->val;
84e690bf
AB
763}
764
281243cb
MZ
765static bool trap_wcr(struct kvm_vcpu *vcpu,
766 struct sys_reg_params *p,
767 const struct sys_reg_desc *rd)
84e690bf 768{
cb853ded 769 u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_wcr[rd->CRm];
84e690bf
AB
770
771 if (p->is_write)
1da42c34 772 reg_to_dbg(vcpu, p, rd, dbg_reg);
84e690bf 773 else
1da42c34 774 dbg_to_reg(vcpu, p, rd, dbg_reg);
84e690bf 775
cb853ded 776 trace_trap_reg(__func__, rd->CRm, p->is_write, *dbg_reg);
eef8c85a 777
84e690bf
AB
778 return true;
779}
780
781static int set_wcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
978ceeb3 782 u64 val)
84e690bf 783{
978ceeb3 784 vcpu->arch.vcpu_debug_state.dbg_wcr[rd->CRm] = val;
84e690bf
AB
785 return 0;
786}
787
788static int get_wcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
978ceeb3 789 u64 *val)
84e690bf 790{
978ceeb3 791 *val = vcpu->arch.vcpu_debug_state.dbg_wcr[rd->CRm];
84e690bf
AB
792 return 0;
793}
794
d86cde6e 795static u64 reset_wcr(struct kvm_vcpu *vcpu,
281243cb 796 const struct sys_reg_desc *rd)
84e690bf 797{
cb853ded 798 vcpu->arch.vcpu_debug_state.dbg_wcr[rd->CRm] = rd->val;
d86cde6e 799 return rd->val;
84e690bf
AB
800}
801
d86cde6e 802static u64 reset_amair_el1(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
7c8c5e6a 803{
8d404c4c
CD
804 u64 amair = read_sysreg(amair_el1);
805 vcpu_write_sys_reg(vcpu, amair, AMAIR_EL1);
d86cde6e 806 return amair;
7c8c5e6a
MZ
807}
808
d86cde6e 809static u64 reset_actlr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
af473829
JM
810{
811 u64 actlr = read_sysreg(actlr_el1);
812 vcpu_write_sys_reg(vcpu, actlr, ACTLR_EL1);
d86cde6e 813 return actlr;
af473829
JM
814}
815
d86cde6e 816static u64 reset_mpidr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
7c8c5e6a 817{
4429fc64
AP
818 u64 mpidr;
819
7c8c5e6a 820 /*
4429fc64
AP
821 * Map the vcpu_id into the first three affinity level fields of
822 * the MPIDR. We limit the number of VCPUs in level 0 due to a
823 * limitation to 16 CPUs in that level in the ICC_SGIxR registers
824 * of the GICv3 to be able to address each CPU directly when
825 * sending IPIs.
7c8c5e6a 826 */
4429fc64
AP
827 mpidr = (vcpu->vcpu_id & 0x0f) << MPIDR_LEVEL_SHIFT(0);
828 mpidr |= ((vcpu->vcpu_id >> 4) & 0xff) << MPIDR_LEVEL_SHIFT(1);
829 mpidr |= ((vcpu->vcpu_id >> 12) & 0xff) << MPIDR_LEVEL_SHIFT(2);
d86cde6e
JZ
830 mpidr |= (1ULL << 31);
831 vcpu_write_sys_reg(vcpu, mpidr, MPIDR_EL1);
832
833 return mpidr;
7c8c5e6a
MZ
834}
835
11663111
MZ
836static unsigned int pmu_visibility(const struct kvm_vcpu *vcpu,
837 const struct sys_reg_desc *r)
838{
839 if (kvm_vcpu_has_pmu(vcpu))
840 return 0;
841
842 return REG_HIDDEN;
843}
844
d86cde6e 845static u64 reset_pmu_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
0ab410a9 846{
ea9ca904
RW
847 u64 mask = BIT(ARMV8_PMU_CYCLE_IDX);
848 u8 n = vcpu->kvm->arch.pmcr_n;
0ab410a9 849
0ab410a9
MZ
850 if (n)
851 mask |= GENMASK(n - 1, 0);
852
853 reset_unknown(vcpu, r);
854 __vcpu_sys_reg(vcpu, r->reg) &= mask;
d86cde6e
JZ
855
856 return __vcpu_sys_reg(vcpu, r->reg);
0ab410a9
MZ
857}
858
d86cde6e 859static u64 reset_pmevcntr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
0ab410a9
MZ
860{
861 reset_unknown(vcpu, r);
862 __vcpu_sys_reg(vcpu, r->reg) &= GENMASK(31, 0);
d86cde6e
JZ
863
864 return __vcpu_sys_reg(vcpu, r->reg);
0ab410a9
MZ
865}
866
d86cde6e 867static u64 reset_pmevtyper(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
0ab410a9 868{
bc512d6a
OU
869 /* This thing will UNDEF, who cares about the reset value? */
870 if (!kvm_vcpu_has_pmu(vcpu))
871 return 0;
872
0ab410a9 873 reset_unknown(vcpu, r);
bc512d6a 874 __vcpu_sys_reg(vcpu, r->reg) &= kvm_pmu_evtyper_mask(vcpu->kvm);
d86cde6e
JZ
875
876 return __vcpu_sys_reg(vcpu, r->reg);
0ab410a9
MZ
877}
878
d86cde6e 879static u64 reset_pmselr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
0ab410a9
MZ
880{
881 reset_unknown(vcpu, r);
882 __vcpu_sys_reg(vcpu, r->reg) &= ARMV8_PMU_COUNTER_MASK;
d86cde6e
JZ
883
884 return __vcpu_sys_reg(vcpu, r->reg);
0ab410a9
MZ
885}
886
d86cde6e 887static u64 reset_pmcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
ab946834 888{
4d20debf 889 u64 pmcr = 0;
ab946834 890
f3c6efc7 891 if (!kvm_supports_32bit_el0())
292e8f14
MZ
892 pmcr |= ARMV8_PMU_PMCR_LC;
893
4d20debf
RRA
894 /*
895 * The value of PMCR.N field is included when the
896 * vCPU register is read via kvm_vcpu_read_pmcr().
897 */
292e8f14 898 __vcpu_sys_reg(vcpu, r->reg) = pmcr;
d86cde6e
JZ
899
900 return __vcpu_sys_reg(vcpu, r->reg);
ab946834
SZ
901}
902
6c007036 903static bool check_pmu_access_disabled(struct kvm_vcpu *vcpu, u64 flags)
d692b8ad 904{
8d404c4c 905 u64 reg = __vcpu_sys_reg(vcpu, PMUSERENR_EL0);
7ded92e2 906 bool enabled = (reg & flags) || vcpu_mode_priv(vcpu);
d692b8ad 907
24d5950f
MZ
908 if (!enabled)
909 kvm_inject_undefined(vcpu);
d692b8ad 910
6c007036 911 return !enabled;
d692b8ad
SZ
912}
913
6c007036 914static bool pmu_access_el0_disabled(struct kvm_vcpu *vcpu)
d692b8ad 915{
6c007036
MZ
916 return check_pmu_access_disabled(vcpu, ARMV8_PMU_USERENR_EN);
917}
d692b8ad 918
6c007036
MZ
919static bool pmu_write_swinc_el0_disabled(struct kvm_vcpu *vcpu)
920{
921 return check_pmu_access_disabled(vcpu, ARMV8_PMU_USERENR_SW | ARMV8_PMU_USERENR_EN);
d692b8ad
SZ
922}
923
924static bool pmu_access_cycle_counter_el0_disabled(struct kvm_vcpu *vcpu)
925{
6c007036 926 return check_pmu_access_disabled(vcpu, ARMV8_PMU_USERENR_CR | ARMV8_PMU_USERENR_EN);
d692b8ad
SZ
927}
928
929static bool pmu_access_event_counter_el0_disabled(struct kvm_vcpu *vcpu)
930{
6c007036 931 return check_pmu_access_disabled(vcpu, ARMV8_PMU_USERENR_ER | ARMV8_PMU_USERENR_EN);
d692b8ad
SZ
932}
933
ab946834
SZ
934static bool access_pmcr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
935 const struct sys_reg_desc *r)
936{
937 u64 val;
938
d692b8ad
SZ
939 if (pmu_access_el0_disabled(vcpu))
940 return false;
941
ab946834 942 if (p->is_write) {
64d6820d
MZ
943 /*
944 * Only update writeable bits of PMCR (continuing into
945 * kvm_pmu_handle_pmcr() as well)
946 */
57fc267f 947 val = kvm_vcpu_read_pmcr(vcpu);
ab946834
SZ
948 val &= ~ARMV8_PMU_PMCR_MASK;
949 val |= p->regval & ARMV8_PMU_PMCR_MASK;
f3c6efc7 950 if (!kvm_supports_32bit_el0())
6f163714 951 val |= ARMV8_PMU_PMCR_LC;
76993739 952 kvm_pmu_handle_pmcr(vcpu, val);
ab946834
SZ
953 } else {
954 /* PMCR.P & PMCR.C are RAZ */
57fc267f 955 val = kvm_vcpu_read_pmcr(vcpu)
ab946834
SZ
956 & ~(ARMV8_PMU_PMCR_P | ARMV8_PMU_PMCR_C);
957 p->regval = val;
958 }
959
960 return true;
961}
962
3965c3ce
SZ
963static bool access_pmselr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
964 const struct sys_reg_desc *r)
965{
d692b8ad
SZ
966 if (pmu_access_event_counter_el0_disabled(vcpu))
967 return false;
968
3965c3ce 969 if (p->is_write)
8d404c4c 970 __vcpu_sys_reg(vcpu, PMSELR_EL0) = p->regval;
3965c3ce
SZ
971 else
972 /* return PMSELR.SEL field */
8d404c4c 973 p->regval = __vcpu_sys_reg(vcpu, PMSELR_EL0)
3965c3ce
SZ
974 & ARMV8_PMU_COUNTER_MASK;
975
976 return true;
977}
978
a86b5505
SZ
979static bool access_pmceid(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
980 const struct sys_reg_desc *r)
981{
99b6a401 982 u64 pmceid, mask, shift;
a86b5505 983
a86b5505
SZ
984 BUG_ON(p->is_write);
985
d692b8ad
SZ
986 if (pmu_access_el0_disabled(vcpu))
987 return false;
988
99b6a401
MZ
989 get_access_mask(r, &mask, &shift);
990
88865bec 991 pmceid = kvm_pmu_get_pmceid(vcpu, (p->Op2 & 1));
99b6a401
MZ
992 pmceid &= mask;
993 pmceid >>= shift;
a86b5505
SZ
994
995 p->regval = pmceid;
996
997 return true;
998}
999
051ff581
SZ
1000static bool pmu_counter_idx_valid(struct kvm_vcpu *vcpu, u64 idx)
1001{
1002 u64 pmcr, val;
1003
57fc267f 1004 pmcr = kvm_vcpu_read_pmcr(vcpu);
62e1f212 1005 val = FIELD_GET(ARMV8_PMU_PMCR_N, pmcr);
24d5950f
MZ
1006 if (idx >= val && idx != ARMV8_PMU_CYCLE_IDX) {
1007 kvm_inject_undefined(vcpu);
051ff581 1008 return false;
24d5950f 1009 }
051ff581
SZ
1010
1011 return true;
1012}
1013
9228b261
RW
1014static int get_pmu_evcntr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r,
1015 u64 *val)
1016{
1017 u64 idx;
1018
1019 if (r->CRn == 9 && r->CRm == 13 && r->Op2 == 0)
1020 /* PMCCNTR_EL0 */
1021 idx = ARMV8_PMU_CYCLE_IDX;
1022 else
1023 /* PMEVCNTRn_EL0 */
1024 idx = ((r->CRm & 3) << 3) | (r->Op2 & 7);
1025
1026 *val = kvm_pmu_get_counter_value(vcpu, idx);
1027 return 0;
1028}
1029
051ff581
SZ
1030static bool access_pmu_evcntr(struct kvm_vcpu *vcpu,
1031 struct sys_reg_params *p,
1032 const struct sys_reg_desc *r)
1033{
a3da9358 1034 u64 idx = ~0UL;
051ff581
SZ
1035
1036 if (r->CRn == 9 && r->CRm == 13) {
1037 if (r->Op2 == 2) {
1038 /* PMXEVCNTR_EL0 */
d692b8ad
SZ
1039 if (pmu_access_event_counter_el0_disabled(vcpu))
1040 return false;
1041
8d404c4c 1042 idx = __vcpu_sys_reg(vcpu, PMSELR_EL0)
051ff581
SZ
1043 & ARMV8_PMU_COUNTER_MASK;
1044 } else if (r->Op2 == 0) {
1045 /* PMCCNTR_EL0 */
d692b8ad
SZ
1046 if (pmu_access_cycle_counter_el0_disabled(vcpu))
1047 return false;
1048
051ff581 1049 idx = ARMV8_PMU_CYCLE_IDX;
051ff581 1050 }
9e3f7a29
WH
1051 } else if (r->CRn == 0 && r->CRm == 9) {
1052 /* PMCCNTR */
1053 if (pmu_access_event_counter_el0_disabled(vcpu))
1054 return false;
1055
1056 idx = ARMV8_PMU_CYCLE_IDX;
051ff581
SZ
1057 } else if (r->CRn == 14 && (r->CRm & 12) == 8) {
1058 /* PMEVCNTRn_EL0 */
d692b8ad
SZ
1059 if (pmu_access_event_counter_el0_disabled(vcpu))
1060 return false;
1061
051ff581 1062 idx = ((r->CRm & 3) << 3) | (r->Op2 & 7);
051ff581
SZ
1063 }
1064
a3da9358
MZ
1065 /* Catch any decoding mistake */
1066 WARN_ON(idx == ~0UL);
1067
051ff581
SZ
1068 if (!pmu_counter_idx_valid(vcpu, idx))
1069 return false;
1070
d692b8ad
SZ
1071 if (p->is_write) {
1072 if (pmu_access_el0_disabled(vcpu))
1073 return false;
1074
051ff581 1075 kvm_pmu_set_counter_value(vcpu, idx, p->regval);
d692b8ad 1076 } else {
051ff581 1077 p->regval = kvm_pmu_get_counter_value(vcpu, idx);
d692b8ad 1078 }
051ff581
SZ
1079
1080 return true;
1081}
1082
9feb21ac
SZ
1083static bool access_pmu_evtyper(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
1084 const struct sys_reg_desc *r)
1085{
1086 u64 idx, reg;
1087
d692b8ad
SZ
1088 if (pmu_access_el0_disabled(vcpu))
1089 return false;
1090
9feb21ac
SZ
1091 if (r->CRn == 9 && r->CRm == 13 && r->Op2 == 1) {
1092 /* PMXEVTYPER_EL0 */
8d404c4c 1093 idx = __vcpu_sys_reg(vcpu, PMSELR_EL0) & ARMV8_PMU_COUNTER_MASK;
9feb21ac
SZ
1094 reg = PMEVTYPER0_EL0 + idx;
1095 } else if (r->CRn == 14 && (r->CRm & 12) == 12) {
1096 idx = ((r->CRm & 3) << 3) | (r->Op2 & 7);
1097 if (idx == ARMV8_PMU_CYCLE_IDX)
1098 reg = PMCCFILTR_EL0;
1099 else
1100 /* PMEVTYPERn_EL0 */
1101 reg = PMEVTYPER0_EL0 + idx;
1102 } else {
1103 BUG();
1104 }
1105
1106 if (!pmu_counter_idx_valid(vcpu, idx))
1107 return false;
1108
1109 if (p->is_write) {
1110 kvm_pmu_set_counter_event_type(vcpu, p->regval, idx);
435e53fb 1111 kvm_vcpu_pmu_restore_guest(vcpu);
9feb21ac 1112 } else {
bc512d6a 1113 p->regval = __vcpu_sys_reg(vcpu, reg);
9feb21ac
SZ
1114 }
1115
1116 return true;
1117}
1118
a45f41d7
RRA
1119static int set_pmreg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r, u64 val)
1120{
1121 bool set;
1122
1123 val &= kvm_pmu_valid_counter_mask(vcpu);
1124
1125 switch (r->reg) {
1126 case PMOVSSET_EL0:
1127 /* CRm[1] being set indicates a SET register, and CLR otherwise */
1128 set = r->CRm & 2;
1129 break;
1130 default:
1131 /* Op2[0] being set indicates a SET register, and CLR otherwise */
1132 set = r->Op2 & 1;
1133 break;
1134 }
1135
1136 if (set)
1137 __vcpu_sys_reg(vcpu, r->reg) |= val;
1138 else
1139 __vcpu_sys_reg(vcpu, r->reg) &= ~val;
1140
1141 return 0;
1142}
1143
1144static int get_pmreg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r, u64 *val)
1145{
1146 u64 mask = kvm_pmu_valid_counter_mask(vcpu);
1147
1148 *val = __vcpu_sys_reg(vcpu, r->reg) & mask;
1149 return 0;
1150}
1151
96b0eebc
SZ
1152static bool access_pmcnten(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
1153 const struct sys_reg_desc *r)
1154{
1155 u64 val, mask;
1156
d692b8ad
SZ
1157 if (pmu_access_el0_disabled(vcpu))
1158 return false;
1159
96b0eebc
SZ
1160 mask = kvm_pmu_valid_counter_mask(vcpu);
1161 if (p->is_write) {
1162 val = p->regval & mask;
1163 if (r->Op2 & 0x1) {
1164 /* accessing PMCNTENSET_EL0 */
8d404c4c 1165 __vcpu_sys_reg(vcpu, PMCNTENSET_EL0) |= val;
418e5ca8 1166 kvm_pmu_enable_counter_mask(vcpu, val);
435e53fb 1167 kvm_vcpu_pmu_restore_guest(vcpu);
96b0eebc
SZ
1168 } else {
1169 /* accessing PMCNTENCLR_EL0 */
8d404c4c 1170 __vcpu_sys_reg(vcpu, PMCNTENSET_EL0) &= ~val;
418e5ca8 1171 kvm_pmu_disable_counter_mask(vcpu, val);
96b0eebc
SZ
1172 }
1173 } else {
f5eff400 1174 p->regval = __vcpu_sys_reg(vcpu, PMCNTENSET_EL0);
96b0eebc
SZ
1175 }
1176
1177 return true;
1178}
1179
9db52c78
SZ
1180static bool access_pminten(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
1181 const struct sys_reg_desc *r)
1182{
1183 u64 mask = kvm_pmu_valid_counter_mask(vcpu);
1184
b0737e99 1185 if (check_pmu_access_disabled(vcpu, 0))
d692b8ad
SZ
1186 return false;
1187
9db52c78
SZ
1188 if (p->is_write) {
1189 u64 val = p->regval & mask;
1190
1191 if (r->Op2 & 0x1)
1192 /* accessing PMINTENSET_EL1 */
8d404c4c 1193 __vcpu_sys_reg(vcpu, PMINTENSET_EL1) |= val;
9db52c78
SZ
1194 else
1195 /* accessing PMINTENCLR_EL1 */
8d404c4c 1196 __vcpu_sys_reg(vcpu, PMINTENSET_EL1) &= ~val;
9db52c78 1197 } else {
f5eff400 1198 p->regval = __vcpu_sys_reg(vcpu, PMINTENSET_EL1);
9db52c78
SZ
1199 }
1200
1201 return true;
1202}
1203
76d883c4
SZ
1204static bool access_pmovs(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
1205 const struct sys_reg_desc *r)
1206{
1207 u64 mask = kvm_pmu_valid_counter_mask(vcpu);
1208
d692b8ad
SZ
1209 if (pmu_access_el0_disabled(vcpu))
1210 return false;
1211
76d883c4
SZ
1212 if (p->is_write) {
1213 if (r->CRm & 0x2)
1214 /* accessing PMOVSSET_EL0 */
8d404c4c 1215 __vcpu_sys_reg(vcpu, PMOVSSET_EL0) |= (p->regval & mask);
76d883c4
SZ
1216 else
1217 /* accessing PMOVSCLR_EL0 */
8d404c4c 1218 __vcpu_sys_reg(vcpu, PMOVSSET_EL0) &= ~(p->regval & mask);
76d883c4 1219 } else {
f5eff400 1220 p->regval = __vcpu_sys_reg(vcpu, PMOVSSET_EL0);
76d883c4
SZ
1221 }
1222
1223 return true;
1224}
1225
7a0adc70
SZ
1226static bool access_pmswinc(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
1227 const struct sys_reg_desc *r)
1228{
1229 u64 mask;
1230
e0443230 1231 if (!p->is_write)
e7f1d1ee 1232 return read_from_write_only(vcpu, p, r);
e0443230 1233
d692b8ad
SZ
1234 if (pmu_write_swinc_el0_disabled(vcpu))
1235 return false;
1236
e0443230
MZ
1237 mask = kvm_pmu_valid_counter_mask(vcpu);
1238 kvm_pmu_software_increment(vcpu, p->regval & mask);
1239 return true;
7a0adc70
SZ
1240}
1241
d692b8ad
SZ
1242static bool access_pmuserenr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
1243 const struct sys_reg_desc *r)
1244{
d692b8ad 1245 if (p->is_write) {
9008c235
MZ
1246 if (!vcpu_mode_priv(vcpu)) {
1247 kvm_inject_undefined(vcpu);
d692b8ad 1248 return false;
9008c235 1249 }
d692b8ad 1250
8d404c4c
CD
1251 __vcpu_sys_reg(vcpu, PMUSERENR_EL0) =
1252 p->regval & ARMV8_PMU_USERENR_MASK;
d692b8ad 1253 } else {
8d404c4c 1254 p->regval = __vcpu_sys_reg(vcpu, PMUSERENR_EL0)
d692b8ad
SZ
1255 & ARMV8_PMU_USERENR_MASK;
1256 }
1257
1258 return true;
1259}
1260
4d20debf
RRA
1261static int get_pmcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r,
1262 u64 *val)
1263{
1264 *val = kvm_vcpu_read_pmcr(vcpu);
1265 return 0;
1266}
1267
ea9ca904
RW
1268static int set_pmcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r,
1269 u64 val)
1270{
62e1f212 1271 u8 new_n = FIELD_GET(ARMV8_PMU_PMCR_N, val);
ea9ca904
RW
1272 struct kvm *kvm = vcpu->kvm;
1273
1274 mutex_lock(&kvm->arch.config_lock);
1275
1276 /*
1277 * The vCPU can't have more counters than the PMU hardware
1278 * implements. Ignore this error to maintain compatibility
1279 * with the existing KVM behavior.
1280 */
1281 if (!kvm_vm_has_ran_once(kvm) &&
1282 new_n <= kvm_arm_pmu_get_max_counters(kvm))
1283 kvm->arch.pmcr_n = new_n;
1284
1285 mutex_unlock(&kvm->arch.config_lock);
1286
1287 /*
1288 * Ignore writes to RES0 bits, read only bits that are cleared on
1289 * vCPU reset, and writable bits that KVM doesn't support yet.
1290 * (i.e. only PMCR.N and bits [7:0] are mutable from userspace)
1291 * The LP bit is RES0 when FEAT_PMUv3p5 is not supported on the vCPU.
1292 * But, we leave the bit as it is here, as the vCPU's PMUver might
1293 * be changed later (NOTE: the bit will be cleared on first vCPU run
1294 * if necessary).
1295 */
1296 val &= ARMV8_PMU_PMCR_MASK;
1297
1298 /* The LC bit is RES1 when AArch32 is not supported */
1299 if (!kvm_supports_32bit_el0())
1300 val |= ARMV8_PMU_PMCR_LC;
1301
1302 __vcpu_sys_reg(vcpu, r->reg) = val;
1303 return 0;
1304}
1305
0c557ed4
MZ
1306/* Silly macro to expand the DBG{BCR,BVR,WVR,WCR}n_EL1 registers in one go */
1307#define DBG_BCR_BVR_WCR_WVR_EL1(n) \
ee1b64e6 1308 { SYS_DESC(SYS_DBGBVRn_EL1(n)), \
03fdfb26 1309 trap_bvr, reset_bvr, 0, 0, get_bvr, set_bvr }, \
ee1b64e6 1310 { SYS_DESC(SYS_DBGBCRn_EL1(n)), \
03fdfb26 1311 trap_bcr, reset_bcr, 0, 0, get_bcr, set_bcr }, \
ee1b64e6 1312 { SYS_DESC(SYS_DBGWVRn_EL1(n)), \
03fdfb26 1313 trap_wvr, reset_wvr, 0, 0, get_wvr, set_wvr }, \
ee1b64e6 1314 { SYS_DESC(SYS_DBGWCRn_EL1(n)), \
03fdfb26 1315 trap_wcr, reset_wcr, 0, 0, get_wcr, set_wcr }
0c557ed4 1316
9d2a55b4
XC
1317#define PMU_SYS_REG(name) \
1318 SYS_DESC(SYS_##name), .reset = reset_pmu_reg, \
1319 .visibility = pmu_visibility
11663111 1320
051ff581
SZ
1321/* Macro to expand the PMEVCNTRn_EL0 register */
1322#define PMU_PMEVCNTR_EL0(n) \
9d2a55b4 1323 { PMU_SYS_REG(PMEVCNTRn_EL0(n)), \
9228b261 1324 .reset = reset_pmevcntr, .get_user = get_pmu_evcntr, \
11663111 1325 .access = access_pmu_evcntr, .reg = (PMEVCNTR0_EL0 + n), }
051ff581 1326
9feb21ac
SZ
1327/* Macro to expand the PMEVTYPERn_EL0 register */
1328#define PMU_PMEVTYPER_EL0(n) \
9d2a55b4 1329 { PMU_SYS_REG(PMEVTYPERn_EL0(n)), \
0ab410a9 1330 .reset = reset_pmevtyper, \
11663111 1331 .access = access_pmu_evtyper, .reg = (PMEVTYPER0_EL0 + n), }
9feb21ac 1332
338b1793
MZ
1333static bool undef_access(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
1334 const struct sys_reg_desc *r)
4fcdf106
IV
1335{
1336 kvm_inject_undefined(vcpu);
1337
1338 return false;
1339}
1340
1341/* Macro to expand the AMU counter and type registers*/
338b1793
MZ
1342#define AMU_AMEVCNTR0_EL0(n) { SYS_DESC(SYS_AMEVCNTR0_EL0(n)), undef_access }
1343#define AMU_AMEVTYPER0_EL0(n) { SYS_DESC(SYS_AMEVTYPER0_EL0(n)), undef_access }
1344#define AMU_AMEVCNTR1_EL0(n) { SYS_DESC(SYS_AMEVCNTR1_EL0(n)), undef_access }
1345#define AMU_AMEVTYPER1_EL0(n) { SYS_DESC(SYS_AMEVTYPER1_EL0(n)), undef_access }
384b40ca
MR
1346
1347static unsigned int ptrauth_visibility(const struct kvm_vcpu *vcpu,
1348 const struct sys_reg_desc *rd)
1349{
01fe5ace 1350 return vcpu_has_ptrauth(vcpu) ? 0 : REG_HIDDEN;
384b40ca
MR
1351}
1352
338b1793
MZ
1353/*
1354 * If we land here on a PtrAuth access, that is because we didn't
1355 * fixup the access on exit by allowing the PtrAuth sysregs. The only
1356 * way this happens is when the guest does not have PtrAuth support
1357 * enabled.
1358 */
384b40ca 1359#define __PTRAUTH_KEY(k) \
338b1793 1360 { SYS_DESC(SYS_## k), undef_access, reset_unknown, k, \
384b40ca
MR
1361 .visibility = ptrauth_visibility}
1362
1363#define PTRAUTH_KEY(k) \
1364 __PTRAUTH_KEY(k ## KEYLO_EL1), \
1365 __PTRAUTH_KEY(k ## KEYHI_EL1)
1366
84135d3d
AP
1367static bool access_arch_timer(struct kvm_vcpu *vcpu,
1368 struct sys_reg_params *p,
1369 const struct sys_reg_desc *r)
c9a3c58f 1370{
84135d3d
AP
1371 enum kvm_arch_timers tmr;
1372 enum kvm_arch_timer_regs treg;
1373 u64 reg = reg_to_encoding(r);
7b6b4631 1374
84135d3d
AP
1375 switch (reg) {
1376 case SYS_CNTP_TVAL_EL0:
1377 case SYS_AARCH32_CNTP_TVAL:
1378 tmr = TIMER_PTIMER;
1379 treg = TIMER_REG_TVAL;
1380 break;
1381 case SYS_CNTP_CTL_EL0:
1382 case SYS_AARCH32_CNTP_CTL:
1383 tmr = TIMER_PTIMER;
1384 treg = TIMER_REG_CTL;
1385 break;
1386 case SYS_CNTP_CVAL_EL0:
1387 case SYS_AARCH32_CNTP_CVAL:
1388 tmr = TIMER_PTIMER;
1389 treg = TIMER_REG_CVAL;
1390 break;
c605ee24
MZ
1391 case SYS_CNTPCT_EL0:
1392 case SYS_CNTPCTSS_EL0:
1393 case SYS_AARCH32_CNTPCT:
1394 tmr = TIMER_PTIMER;
1395 treg = TIMER_REG_CNT;
1396 break;
84135d3d 1397 default:
ba82e06c
MZ
1398 print_sys_reg_msg(p, "%s", "Unhandled trapped timer register");
1399 kvm_inject_undefined(vcpu);
1400 return false;
c1b135af 1401 }
7b6b4631 1402
7b6b4631 1403 if (p->is_write)
84135d3d 1404 kvm_arm_timer_write_sysreg(vcpu, tmr, treg, p->regval);
7b6b4631 1405 else
84135d3d 1406 p->regval = kvm_arm_timer_read_sysreg(vcpu, tmr, treg);
7b6b4631 1407
c9a3c58f
JL
1408 return true;
1409}
1410
2e8bf0cb
JZ
1411static s64 kvm_arm64_ftr_safe_value(u32 id, const struct arm64_ftr_bits *ftrp,
1412 s64 new, s64 cur)
3d0dba57 1413{
2e8bf0cb
JZ
1414 struct arm64_ftr_bits kvm_ftr = *ftrp;
1415
1416 /* Some features have different safe value type in KVM than host features */
1417 switch (id) {
1418 case SYS_ID_AA64DFR0_EL1:
a9bc4a1c
OU
1419 switch (kvm_ftr.shift) {
1420 case ID_AA64DFR0_EL1_PMUVer_SHIFT:
2e8bf0cb 1421 kvm_ftr.type = FTR_LOWER_SAFE;
a9bc4a1c
OU
1422 break;
1423 case ID_AA64DFR0_EL1_DebugVer_SHIFT:
2e8bf0cb 1424 kvm_ftr.type = FTR_LOWER_SAFE;
a9bc4a1c
OU
1425 break;
1426 }
2e8bf0cb
JZ
1427 break;
1428 case SYS_ID_DFR0_EL1:
1429 if (kvm_ftr.shift == ID_DFR0_EL1_PerfMon_SHIFT)
1430 kvm_ftr.type = FTR_LOWER_SAFE;
1431 break;
1432 }
3d0dba57 1433
2e8bf0cb 1434 return arm64_ftr_safe_value(&kvm_ftr, new, cur);
3d0dba57
MZ
1435}
1436
7b424ffc 1437/*
2e8bf0cb
JZ
1438 * arm64_check_features() - Check if a feature register value constitutes
1439 * a subset of features indicated by the idreg's KVM sanitised limit.
1440 *
1441 * This function will check if each feature field of @val is the "safe" value
1442 * against idreg's KVM sanitised limit return from reset() callback.
1443 * If a field value in @val is the same as the one in limit, it is always
1444 * considered the safe value regardless For register fields that are not in
1445 * writable, only the value in limit is considered the safe value.
1446 *
1447 * Return: 0 if all the fields are safe. Otherwise, return negative errno.
1448 */
1449static int arm64_check_features(struct kvm_vcpu *vcpu,
1450 const struct sys_reg_desc *rd,
1451 u64 val)
d82e0dfd 1452{
2e8bf0cb
JZ
1453 const struct arm64_ftr_reg *ftr_reg;
1454 const struct arm64_ftr_bits *ftrp = NULL;
1455 u32 id = reg_to_encoding(rd);
1456 u64 writable_mask = rd->val;
1457 u64 limit = rd->reset(vcpu, rd);
1458 u64 mask = 0;
1459
1460 /*
1461 * Hidden and unallocated ID registers may not have a corresponding
1462 * struct arm64_ftr_reg. Of course, if the register is RAZ we know the
1463 * only safe value is 0.
1464 */
1465 if (sysreg_visible_as_raz(vcpu, rd))
1466 return val ? -E2BIG : 0;
1467
1468 ftr_reg = get_arm64_ftr_reg(id);
1469 if (!ftr_reg)
1470 return -EINVAL;
1471
1472 ftrp = ftr_reg->ftr_bits;
1473
1474 for (; ftrp && ftrp->width; ftrp++) {
1475 s64 f_val, f_lim, safe_val;
1476 u64 ftr_mask;
1477
1478 ftr_mask = arm64_ftr_mask(ftrp);
1479 if ((ftr_mask & writable_mask) != ftr_mask)
1480 continue;
1481
1482 f_val = arm64_ftr_value(ftrp, val);
1483 f_lim = arm64_ftr_value(ftrp, limit);
1484 mask |= ftr_mask;
1485
1486 if (f_val == f_lim)
1487 safe_val = f_val;
1488 else
1489 safe_val = kvm_arm64_ftr_safe_value(id, ftrp, f_val, f_lim);
1490
1491 if (safe_val != f_val)
1492 return -E2BIG;
d82e0dfd 1493 }
2e8bf0cb
JZ
1494
1495 /* For fields that are not writable, values in limit are the safe values. */
1496 if ((val & ~mask) != (limit & ~mask))
1497 return -E2BIG;
1498
1499 return 0;
d82e0dfd
MZ
1500}
1501
3d0dba57
MZ
1502static u8 pmuver_to_perfmon(u8 pmuver)
1503{
1504 switch (pmuver) {
1505 case ID_AA64DFR0_EL1_PMUVer_IMP:
753d734f 1506 return ID_DFR0_EL1_PerfMon_PMUv3;
3d0dba57 1507 case ID_AA64DFR0_EL1_PMUVer_IMP_DEF:
753d734f 1508 return ID_DFR0_EL1_PerfMon_IMPDEF;
3d0dba57
MZ
1509 default:
1510 /* Anything ARMv8.1+ and NI have the same value. For now. */
1511 return pmuver;
1512 }
1513}
1514
93390c0a 1515/* Read a sanitised cpufeature ID register by sys_reg_desc */
d86cde6e
JZ
1516static u64 __kvm_read_sanitised_id_reg(const struct kvm_vcpu *vcpu,
1517 const struct sys_reg_desc *r)
93390c0a 1518{
7ba8b438 1519 u32 id = reg_to_encoding(r);
00d5101b
AE
1520 u64 val;
1521
cdd5036d 1522 if (sysreg_visible_as_raz(vcpu, r))
00d5101b
AE
1523 return 0;
1524
1525 val = read_sanitised_ftr_reg(id);
93390c0a 1526
c8857935 1527 switch (id) {
c8857935 1528 case SYS_ID_AA64PFR1_EL1:
16dd1fbb 1529 if (!kvm_has_mte(vcpu->kvm))
6ca2b9ca 1530 val &= ~ARM64_FEATURE_MASK(ID_AA64PFR1_EL1_MTE);
90807748 1531
6ca2b9ca 1532 val &= ~ARM64_FEATURE_MASK(ID_AA64PFR1_EL1_SME);
c8857935
MZ
1533 break;
1534 case SYS_ID_AA64ISAR1_EL1:
1535 if (!vcpu_has_ptrauth(vcpu))
aa50479b
MB
1536 val &= ~(ARM64_FEATURE_MASK(ID_AA64ISAR1_EL1_APA) |
1537 ARM64_FEATURE_MASK(ID_AA64ISAR1_EL1_API) |
1538 ARM64_FEATURE_MASK(ID_AA64ISAR1_EL1_GPA) |
1539 ARM64_FEATURE_MASK(ID_AA64ISAR1_EL1_GPI));
c8857935 1540 break;
def8c222
VM
1541 case SYS_ID_AA64ISAR2_EL1:
1542 if (!vcpu_has_ptrauth(vcpu))
b2d71f27
MB
1543 val &= ~(ARM64_FEATURE_MASK(ID_AA64ISAR2_EL1_APA3) |
1544 ARM64_FEATURE_MASK(ID_AA64ISAR2_EL1_GPA3));
06e0b802 1545 if (!cpus_have_final_cap(ARM64_HAS_WFXT))
b2d71f27 1546 val &= ~ARM64_FEATURE_MASK(ID_AA64ISAR2_EL1_WFxT);
def8c222 1547 break;
bf48040c
AO
1548 case SYS_ID_AA64MMFR2_EL1:
1549 val &= ~ID_AA64MMFR2_EL1_CCIDX_MASK;
1550 break;
1551 case SYS_ID_MMFR4_EL1:
1552 val &= ~ARM64_FEATURE_MASK(ID_MMFR4_EL1_CCIDX);
1553 break;
07d79fe7
DM
1554 }
1555
1556 return val;
93390c0a
DM
1557}
1558
d86cde6e
JZ
1559static u64 kvm_read_sanitised_id_reg(struct kvm_vcpu *vcpu,
1560 const struct sys_reg_desc *r)
1561{
1562 return __kvm_read_sanitised_id_reg(vcpu, r);
1563}
1564
1565static u64 read_id_reg(const struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
1566{
6db7af0d 1567 return IDREG(vcpu->kvm, reg_to_encoding(r));
d86cde6e
JZ
1568}
1569
47334146
JZ
1570/*
1571 * Return true if the register's (Op0, Op1, CRn, CRm, Op2) is
1572 * (3, 0, 0, crm, op2), where 1<=crm<8, 0<=op2<8.
1573 */
1574static inline bool is_id_reg(u32 id)
1575{
1576 return (sys_reg_Op0(id) == 3 && sys_reg_Op1(id) == 0 &&
1577 sys_reg_CRn(id) == 0 && sys_reg_CRm(id) >= 1 &&
1578 sys_reg_CRm(id) < 8);
1579}
1580
3f9cd0ca
JZ
1581static inline bool is_aa32_id_reg(u32 id)
1582{
1583 return (sys_reg_Op0(id) == 3 && sys_reg_Op1(id) == 0 &&
1584 sys_reg_CRn(id) == 0 && sys_reg_CRm(id) >= 1 &&
1585 sys_reg_CRm(id) <= 3);
1586}
1587
912dee57
AJ
1588static unsigned int id_visibility(const struct kvm_vcpu *vcpu,
1589 const struct sys_reg_desc *r)
1590{
7ba8b438 1591 u32 id = reg_to_encoding(r);
c512298e
AJ
1592
1593 switch (id) {
1594 case SYS_ID_AA64ZFR0_EL1:
1595 if (!vcpu_has_sve(vcpu))
1596 return REG_RAZ;
1597 break;
1598 }
1599
912dee57
AJ
1600 return 0;
1601}
1602
d5efec7e
OU
1603static unsigned int aa32_id_visibility(const struct kvm_vcpu *vcpu,
1604 const struct sys_reg_desc *r)
1605{
1606 /*
1607 * AArch32 ID registers are UNKNOWN if AArch32 isn't implemented at any
1608 * EL. Promote to RAZ/WI in order to guarantee consistency between
1609 * systems.
1610 */
1611 if (!kvm_supports_32bit_el0())
1612 return REG_RAZ | REG_USER_WI;
1613
1614 return id_visibility(vcpu, r);
1615}
1616
34b4d203
OU
1617static unsigned int raz_visibility(const struct kvm_vcpu *vcpu,
1618 const struct sys_reg_desc *r)
1619{
1620 return REG_RAZ;
1621}
1622
93390c0a
DM
1623/* cpufeature ID register access trap handlers */
1624
93390c0a
DM
1625static bool access_id_reg(struct kvm_vcpu *vcpu,
1626 struct sys_reg_params *p,
1627 const struct sys_reg_desc *r)
1628{
4782ccc8
OU
1629 if (p->is_write)
1630 return write_to_read_only(vcpu, p, r);
1631
cdd5036d 1632 p->regval = read_id_reg(vcpu, r);
9f75b6d4 1633
4782ccc8 1634 return true;
93390c0a
DM
1635}
1636
73433762
DM
1637/* Visibility overrides for SVE-specific control registers */
1638static unsigned int sve_visibility(const struct kvm_vcpu *vcpu,
1639 const struct sys_reg_desc *rd)
1640{
1641 if (vcpu_has_sve(vcpu))
1642 return 0;
1643
01fe5ace 1644 return REG_HIDDEN;
73433762
DM
1645}
1646
c39f5974
JZ
1647static u64 read_sanitised_id_aa64pfr0_el1(struct kvm_vcpu *vcpu,
1648 const struct sys_reg_desc *rd)
23711a5e 1649{
c39f5974
JZ
1650 u64 val = read_sanitised_ftr_reg(SYS_ID_AA64PFR0_EL1);
1651
1652 if (!vcpu_has_sve(vcpu))
1653 val &= ~ID_AA64PFR0_EL1_SVE_MASK;
23711a5e
MZ
1654
1655 /*
c39f5974
JZ
1656 * The default is to expose CSV2 == 1 if the HW isn't affected.
1657 * Although this is a per-CPU feature, we make it global because
1658 * asymmetric systems are just a nuisance.
1659 *
1660 * Userspace can override this as long as it doesn't promise
1661 * the impossible.
23711a5e 1662 */
c39f5974
JZ
1663 if (arm64_get_spectre_v2_state() == SPECTRE_UNAFFECTED) {
1664 val &= ~ID_AA64PFR0_EL1_CSV2_MASK;
1665 val |= SYS_FIELD_PREP_ENUM(ID_AA64PFR0_EL1, CSV2, IMP);
1666 }
1667 if (arm64_get_meltdown_state() == SPECTRE_UNAFFECTED) {
1668 val &= ~ID_AA64PFR0_EL1_CSV3_MASK;
1669 val |= SYS_FIELD_PREP_ENUM(ID_AA64PFR0_EL1, CSV3, IMP);
1670 }
23711a5e 1671
c39f5974
JZ
1672 if (kvm_vgic_global_state.type == VGIC_V3) {
1673 val &= ~ID_AA64PFR0_EL1_GIC_MASK;
1674 val |= SYS_FIELD_PREP_ENUM(ID_AA64PFR0_EL1, GIC, IMP);
1675 }
4f1df628 1676
c39f5974 1677 val &= ~ID_AA64PFR0_EL1_AMU_MASK;
23711a5e 1678
c39f5974
JZ
1679 return val;
1680}
23711a5e 1681
a9bc4a1c
OU
1682#define ID_REG_LIMIT_FIELD_ENUM(val, reg, field, limit) \
1683({ \
1684 u64 __f_val = FIELD_GET(reg##_##field##_MASK, val); \
1685 (val) &= ~reg##_##field##_MASK; \
1686 (val) |= FIELD_PREP(reg##_##field##_MASK, \
53eaeb7f
MZ
1687 min(__f_val, \
1688 (u64)SYS_FIELD_VALUE(reg, field, limit))); \
a9bc4a1c
OU
1689 (val); \
1690})
1691
c118cead
JZ
1692static u64 read_sanitised_id_aa64dfr0_el1(struct kvm_vcpu *vcpu,
1693 const struct sys_reg_desc *rd)
1694{
1695 u64 val = read_sanitised_ftr_reg(SYS_ID_AA64DFR0_EL1);
1696
9f9917bc 1697 val = ID_REG_LIMIT_FIELD_ENUM(val, ID_AA64DFR0_EL1, DebugVer, V8P8);
c118cead
JZ
1698
1699 /*
1700 * Only initialize the PMU version if the vCPU was configured with one.
1701 */
1702 val &= ~ID_AA64DFR0_EL1_PMUVer_MASK;
1703 if (kvm_vcpu_has_pmu(vcpu))
1704 val |= SYS_FIELD_PREP(ID_AA64DFR0_EL1, PMUVer,
1705 kvm_arm_pmu_get_pmuver_limit());
1706
1707 /* Hide SPE from guests */
1708 val &= ~ID_AA64DFR0_EL1_PMSVer_MASK;
1709
1710 return val;
23711a5e
MZ
1711}
1712
60e651ff
MZ
1713static int set_id_aa64dfr0_el1(struct kvm_vcpu *vcpu,
1714 const struct sys_reg_desc *rd,
1715 u64 val)
1716{
a9bc4a1c 1717 u8 debugver = SYS_FIELD_GET(ID_AA64DFR0_EL1, DebugVer, val);
c118cead 1718 u8 pmuver = SYS_FIELD_GET(ID_AA64DFR0_EL1, PMUVer, val);
60e651ff
MZ
1719
1720 /*
f90f9360
OU
1721 * Prior to commit 3d0dba5764b9 ("KVM: arm64: PMU: Move the
1722 * ID_AA64DFR0_EL1.PMUver limit to VM creation"), KVM erroneously
1723 * exposed an IMP_DEF PMU to userspace and the guest on systems w/
1724 * non-architectural PMUs. Of course, PMUv3 is the only game in town for
1725 * PMU virtualization, so the IMP_DEF value was rather user-hostile.
1726 *
1727 * At minimum, we're on the hook to allow values that were given to
1728 * userspace by KVM. Cover our tracks here and replace the IMP_DEF value
1729 * with a more sensible NI. The value of an ID register changing under
1730 * the nose of the guest is unfortunate, but is certainly no more
1731 * surprising than an ill-guided PMU driver poking at impdef system
1732 * registers that end in an UNDEF...
60e651ff 1733 */
68667240 1734 if (pmuver == ID_AA64DFR0_EL1_PMUVer_IMP_DEF)
f90f9360 1735 val &= ~ID_AA64DFR0_EL1_PMUVer_MASK;
60e651ff 1736
a9bc4a1c
OU
1737 /*
1738 * ID_AA64DFR0_EL1.DebugVer is one of those awkward fields with a
1739 * nonzero minimum safe value.
1740 */
1741 if (debugver < ID_AA64DFR0_EL1_DebugVer_IMP)
1742 return -EINVAL;
1743
68667240 1744 return set_id_reg(vcpu, rd, val);
c118cead 1745}
60e651ff 1746
c118cead
JZ
1747static u64 read_sanitised_id_dfr0_el1(struct kvm_vcpu *vcpu,
1748 const struct sys_reg_desc *rd)
1749{
1750 u8 perfmon = pmuver_to_perfmon(kvm_arm_pmu_get_pmuver_limit());
1751 u64 val = read_sanitised_ftr_reg(SYS_ID_DFR0_EL1);
60e651ff 1752
c118cead
JZ
1753 val &= ~ID_DFR0_EL1_PerfMon_MASK;
1754 if (kvm_vcpu_has_pmu(vcpu))
1755 val |= SYS_FIELD_PREP(ID_DFR0_EL1, PerfMon, perfmon);
60e651ff 1756
9f9917bc
OU
1757 val = ID_REG_LIMIT_FIELD_ENUM(val, ID_DFR0_EL1, CopDbg, Debugv8p8);
1758
c118cead 1759 return val;
60e651ff
MZ
1760}
1761
d82e0dfd
MZ
1762static int set_id_dfr0_el1(struct kvm_vcpu *vcpu,
1763 const struct sys_reg_desc *rd,
1764 u64 val)
1765{
c118cead 1766 u8 perfmon = SYS_FIELD_GET(ID_DFR0_EL1, PerfMon, val);
a9bc4a1c 1767 u8 copdbg = SYS_FIELD_GET(ID_DFR0_EL1, CopDbg, val);
d82e0dfd 1768
f90f9360
OU
1769 if (perfmon == ID_DFR0_EL1_PerfMon_IMPDEF) {
1770 val &= ~ID_DFR0_EL1_PerfMon_MASK;
1771 perfmon = 0;
1772 }
d82e0dfd
MZ
1773
1774 /*
1775 * Allow DFR0_EL1.PerfMon to be set from userspace as long as
1776 * it doesn't promise more than what the HW gives us on the
1777 * AArch64 side (as everything is emulated with that), and
1778 * that this is a PMUv3.
1779 */
c118cead 1780 if (perfmon != 0 && perfmon < ID_DFR0_EL1_PerfMon_PMUv3)
d82e0dfd
MZ
1781 return -EINVAL;
1782
a9bc4a1c
OU
1783 if (copdbg < ID_DFR0_EL1_CopDbg_Armv8)
1784 return -EINVAL;
1785
68667240 1786 return set_id_reg(vcpu, rd, val);
d82e0dfd
MZ
1787}
1788
93390c0a
DM
1789/*
1790 * cpufeature ID register user accessors
1791 *
1792 * For now, these registers are immutable for userspace, so no values
1793 * are stored, and for set_id_reg() we don't allow the effective value
1794 * to be changed.
1795 */
93390c0a 1796static int get_id_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
978ceeb3 1797 u64 *val)
93390c0a 1798{
6db7af0d
OU
1799 /*
1800 * Avoid locking if the VM has already started, as the ID registers are
1801 * guaranteed to be invariant at that point.
1802 */
1803 if (kvm_vm_has_ran_once(vcpu->kvm)) {
1804 *val = read_id_reg(vcpu, rd);
1805 return 0;
1806 }
1807
1808 mutex_lock(&vcpu->kvm->arch.config_lock);
cdd5036d 1809 *val = read_id_reg(vcpu, rd);
6db7af0d
OU
1810 mutex_unlock(&vcpu->kvm->arch.config_lock);
1811
4782ccc8 1812 return 0;
93390c0a
DM
1813}
1814
1815static int set_id_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
978ceeb3 1816 u64 val)
93390c0a 1817{
2e8bf0cb
JZ
1818 u32 id = reg_to_encoding(rd);
1819 int ret;
4782ccc8 1820
2e8bf0cb
JZ
1821 mutex_lock(&vcpu->kvm->arch.config_lock);
1822
1823 /*
1824 * Once the VM has started the ID registers are immutable. Reject any
1825 * write that does not match the final register value.
1826 */
1827 if (kvm_vm_has_ran_once(vcpu->kvm)) {
1828 if (val != read_id_reg(vcpu, rd))
1829 ret = -EBUSY;
1830 else
1831 ret = 0;
1832
1833 mutex_unlock(&vcpu->kvm->arch.config_lock);
1834 return ret;
1835 }
1836
1837 ret = arm64_check_features(vcpu, rd, val);
1838 if (!ret)
1839 IDREG(vcpu->kvm, id) = val;
1840
1841 mutex_unlock(&vcpu->kvm->arch.config_lock);
1842
1843 /*
1844 * arm64_check_features() returns -E2BIG to indicate the register's
1845 * feature set is a superset of the maximally-allowed register value.
1846 * While it would be nice to precisely describe this to userspace, the
1847 * existing UAPI for KVM_SET_ONE_REG has it that invalid register
1848 * writes return -EINVAL.
1849 */
1850 if (ret == -E2BIG)
1851 ret = -EINVAL;
1852 return ret;
93390c0a
DM
1853}
1854
5a430976 1855static int get_raz_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
978ceeb3 1856 u64 *val)
5a430976 1857{
978ceeb3
MZ
1858 *val = 0;
1859 return 0;
5a430976
AE
1860}
1861
7a3ba309 1862static int set_wi_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
978ceeb3 1863 u64 val)
7a3ba309 1864{
7a3ba309
MZ
1865 return 0;
1866}
1867
f7f2b15c
AB
1868static bool access_ctr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
1869 const struct sys_reg_desc *r)
1870{
1871 if (p->is_write)
1872 return write_to_read_only(vcpu, p, r);
1873
1874 p->regval = read_sanitised_ftr_reg(SYS_CTR_EL0);
1875 return true;
1876}
1877
1878static bool access_clidr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
1879 const struct sys_reg_desc *r)
1880{
1881 if (p->is_write)
1882 return write_to_read_only(vcpu, p, r);
1883
7af0c253 1884 p->regval = __vcpu_sys_reg(vcpu, r->reg);
f7f2b15c
AB
1885 return true;
1886}
1887
7af0c253
AO
1888/*
1889 * Fabricate a CLIDR_EL1 value instead of using the real value, which can vary
1890 * by the physical CPU which the vcpu currently resides in.
1891 */
d86cde6e 1892static u64 reset_clidr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
7af0c253
AO
1893{
1894 u64 ctr_el0 = read_sanitised_ftr_reg(SYS_CTR_EL0);
1895 u64 clidr;
1896 u8 loc;
1897
1898 if ((ctr_el0 & CTR_EL0_IDC)) {
1899 /*
1900 * Data cache clean to the PoU is not required so LoUU and LoUIS
1901 * will not be set and a unified cache, which will be marked as
1902 * LoC, will be added.
1903 *
1904 * If not DIC, let the unified cache L2 so that an instruction
1905 * cache can be added as L1 later.
1906 */
1907 loc = (ctr_el0 & CTR_EL0_DIC) ? 1 : 2;
1908 clidr = CACHE_TYPE_UNIFIED << CLIDR_CTYPE_SHIFT(loc);
1909 } else {
1910 /*
1911 * Data cache clean to the PoU is required so let L1 have a data
1912 * cache and mark it as LoUU and LoUIS. As L1 has a data cache,
1913 * it can be marked as LoC too.
1914 */
1915 loc = 1;
1916 clidr = 1 << CLIDR_LOUU_SHIFT;
1917 clidr |= 1 << CLIDR_LOUIS_SHIFT;
1918 clidr |= CACHE_TYPE_DATA << CLIDR_CTYPE_SHIFT(1);
1919 }
1920
1921 /*
1922 * Instruction cache invalidation to the PoU is required so let L1 have
1923 * an instruction cache. If L1 already has a data cache, it will be
1924 * CACHE_TYPE_SEPARATE.
1925 */
1926 if (!(ctr_el0 & CTR_EL0_DIC))
1927 clidr |= CACHE_TYPE_INST << CLIDR_CTYPE_SHIFT(1);
1928
1929 clidr |= loc << CLIDR_LOC_SHIFT;
1930
1931 /*
1932 * Add tag cache unified to data cache. Allocation tags and data are
1933 * unified in a cache line so that it looks valid even if there is only
1934 * one cache line.
1935 */
1936 if (kvm_has_mte(vcpu->kvm))
1937 clidr |= 2 << CLIDR_TTYPE_SHIFT(loc);
1938
1939 __vcpu_sys_reg(vcpu, r->reg) = clidr;
d86cde6e
JZ
1940
1941 return __vcpu_sys_reg(vcpu, r->reg);
7af0c253
AO
1942}
1943
1944static int set_clidr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
1945 u64 val)
1946{
1947 u64 ctr_el0 = read_sanitised_ftr_reg(SYS_CTR_EL0);
1948 u64 idc = !CLIDR_LOC(val) || (!CLIDR_LOUIS(val) && !CLIDR_LOUU(val));
1949
1950 if ((val & CLIDR_EL1_RES0) || (!(ctr_el0 & CTR_EL0_IDC) && idc))
1951 return -EINVAL;
1952
1953 __vcpu_sys_reg(vcpu, rd->reg) = val;
1954
1955 return 0;
1956}
1957
f7f2b15c
AB
1958static bool access_csselr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
1959 const struct sys_reg_desc *r)
1960{
7c582bf4
JM
1961 int reg = r->reg;
1962
f7f2b15c 1963 if (p->is_write)
7c582bf4 1964 vcpu_write_sys_reg(vcpu, p->regval, reg);
f7f2b15c 1965 else
7c582bf4 1966 p->regval = vcpu_read_sys_reg(vcpu, reg);
f7f2b15c
AB
1967 return true;
1968}
1969
1970static bool access_ccsidr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
1971 const struct sys_reg_desc *r)
1972{
1973 u32 csselr;
1974
1975 if (p->is_write)
1976 return write_to_read_only(vcpu, p, r);
1977
1978 csselr = vcpu_read_sys_reg(vcpu, CSSELR_EL1);
7af0c253
AO
1979 csselr &= CSSELR_EL1_Level | CSSELR_EL1_InD;
1980 if (csselr < CSSELR_MAX)
1981 p->regval = get_ccsidr(vcpu, csselr);
793acf87 1982
f7f2b15c
AB
1983 return true;
1984}
1985
e1f358b5
SP
1986static unsigned int mte_visibility(const struct kvm_vcpu *vcpu,
1987 const struct sys_reg_desc *rd)
1988{
673638f4
SP
1989 if (kvm_has_mte(vcpu->kvm))
1990 return 0;
1991
e1f358b5
SP
1992 return REG_HIDDEN;
1993}
1994
1995#define MTE_REG(name) { \
1996 SYS_DESC(SYS_##name), \
1997 .access = undef_access, \
1998 .reset = reset_unknown, \
1999 .reg = name, \
2000 .visibility = mte_visibility, \
2001}
2002
6ff9dc23
JL
2003static unsigned int el2_visibility(const struct kvm_vcpu *vcpu,
2004 const struct sys_reg_desc *rd)
2005{
2006 if (vcpu_has_nv(vcpu))
2007 return 0;
2008
2009 return REG_HIDDEN;
2010}
2011
9b9cce60
MZ
2012static bool bad_vncr_trap(struct kvm_vcpu *vcpu,
2013 struct sys_reg_params *p,
2014 const struct sys_reg_desc *r)
2015{
2016 /*
2017 * We really shouldn't be here, and this is likely the result
2018 * of a misconfigured trap, as this register should target the
2019 * VNCR page, and nothing else.
2020 */
2021 return bad_trap(vcpu, p, r,
2022 "trap of VNCR-backed register");
2023}
2024
2025static bool bad_redir_trap(struct kvm_vcpu *vcpu,
2026 struct sys_reg_params *p,
2027 const struct sys_reg_desc *r)
2028{
2029 /*
2030 * We really shouldn't be here, and this is likely the result
2031 * of a misconfigured trap, as this register should target the
2032 * corresponding EL1, and nothing else.
2033 */
2034 return bad_trap(vcpu, p, r,
2035 "trap of EL2 register redirected to EL1");
2036}
2037
6ff9dc23
JL
2038#define EL2_REG(name, acc, rst, v) { \
2039 SYS_DESC(SYS_##name), \
2040 .access = acc, \
2041 .reset = rst, \
2042 .reg = name, \
2043 .visibility = el2_visibility, \
2044 .val = v, \
2045}
2046
9b9cce60
MZ
2047#define EL2_REG_VNCR(name, rst, v) EL2_REG(name, bad_vncr_trap, rst, v)
2048#define EL2_REG_REDIR(name, rst, v) EL2_REG(name, bad_redir_trap, rst, v)
2049
280b748e
JL
2050/*
2051 * EL{0,1}2 registers are the EL2 view on an EL0 or EL1 register when
2052 * HCR_EL2.E2H==1, and only in the sysreg table for convenience of
2053 * handling traps. Given that, they are always hidden from userspace.
2054 */
3f7915cc
MZ
2055static unsigned int hidden_user_visibility(const struct kvm_vcpu *vcpu,
2056 const struct sys_reg_desc *rd)
280b748e
JL
2057{
2058 return REG_HIDDEN_USER;
2059}
2060
2061#define EL12_REG(name, acc, rst, v) { \
2062 SYS_DESC(SYS_##name##_EL12), \
2063 .access = acc, \
2064 .reset = rst, \
2065 .reg = name##_EL1, \
2066 .val = v, \
3f7915cc 2067 .visibility = hidden_user_visibility, \
280b748e
JL
2068}
2069
d86cde6e
JZ
2070/*
2071 * Since reset() callback and field val are not used for idregs, they will be
2072 * used for specific purposes for idregs.
2073 * The reset() would return KVM sanitised register value. The value would be the
2074 * same as the host kernel sanitised value if there is no KVM sanitisation.
2075 * The val would be used as a mask indicating writable fields for the idreg.
2076 * Only bits with 1 are writable from userspace. This mask might not be
2077 * necessary in the future whenever all ID registers are enabled as writable
2078 * from userspace.
2079 */
2080
56d77aa8 2081#define ID_DESC(name) \
93390c0a
DM
2082 SYS_DESC(SYS_##name), \
2083 .access = access_id_reg, \
56d77aa8
OU
2084 .get_user = get_id_reg \
2085
2086/* sys_reg_desc initialiser for known cpufeature ID registers */
2087#define ID_SANITISED(name) { \
2088 ID_DESC(name), \
93390c0a 2089 .set_user = set_id_reg, \
912dee57 2090 .visibility = id_visibility, \
d86cde6e
JZ
2091 .reset = kvm_read_sanitised_id_reg, \
2092 .val = 0, \
93390c0a
DM
2093}
2094
d5efec7e
OU
2095/* sys_reg_desc initialiser for known cpufeature ID registers */
2096#define AA32_ID_SANITISED(name) { \
56d77aa8 2097 ID_DESC(name), \
d5efec7e
OU
2098 .set_user = set_id_reg, \
2099 .visibility = aa32_id_visibility, \
d86cde6e
JZ
2100 .reset = kvm_read_sanitised_id_reg, \
2101 .val = 0, \
d5efec7e
OU
2102}
2103
56d77aa8
OU
2104/* sys_reg_desc initialiser for writable ID registers */
2105#define ID_WRITABLE(name, mask) { \
2106 ID_DESC(name), \
2107 .set_user = set_id_reg, \
2108 .visibility = id_visibility, \
2109 .reset = kvm_read_sanitised_id_reg, \
2110 .val = mask, \
2111}
2112
93390c0a
DM
2113/*
2114 * sys_reg_desc initialiser for architecturally unallocated cpufeature ID
2115 * register with encoding Op0=3, Op1=0, CRn=0, CRm=crm, Op2=op2
2116 * (1 <= crm < 8, 0 <= Op2 < 8).
2117 */
2118#define ID_UNALLOCATED(crm, op2) { \
2119 Op0(3), Op1(0), CRn(0), CRm(crm), Op2(op2), \
34b4d203
OU
2120 .access = access_id_reg, \
2121 .get_user = get_id_reg, \
2122 .set_user = set_id_reg, \
d86cde6e
JZ
2123 .visibility = raz_visibility, \
2124 .reset = kvm_read_sanitised_id_reg, \
2125 .val = 0, \
93390c0a
DM
2126}
2127
2128/*
2129 * sys_reg_desc initialiser for known ID registers that we hide from guests.
2130 * For now, these are exposed just like unallocated ID regs: they appear
2131 * RAZ for the guest.
2132 */
2133#define ID_HIDDEN(name) { \
56d77aa8 2134 ID_DESC(name), \
34b4d203
OU
2135 .set_user = set_id_reg, \
2136 .visibility = raz_visibility, \
d86cde6e
JZ
2137 .reset = kvm_read_sanitised_id_reg, \
2138 .val = 0, \
93390c0a
DM
2139}
2140
6ff9dc23
JL
2141static bool access_sp_el1(struct kvm_vcpu *vcpu,
2142 struct sys_reg_params *p,
2143 const struct sys_reg_desc *r)
2144{
2145 if (p->is_write)
2146 __vcpu_sys_reg(vcpu, SP_EL1) = p->regval;
2147 else
2148 p->regval = __vcpu_sys_reg(vcpu, SP_EL1);
2149
2150 return true;
2151}
2152
9da117ee
JL
2153static bool access_elr(struct kvm_vcpu *vcpu,
2154 struct sys_reg_params *p,
2155 const struct sys_reg_desc *r)
2156{
2157 if (p->is_write)
2158 vcpu_write_sys_reg(vcpu, p->regval, ELR_EL1);
2159 else
2160 p->regval = vcpu_read_sys_reg(vcpu, ELR_EL1);
2161
2162 return true;
2163}
2164
2165static bool access_spsr(struct kvm_vcpu *vcpu,
2166 struct sys_reg_params *p,
2167 const struct sys_reg_desc *r)
2168{
2169 if (p->is_write)
2170 __vcpu_sys_reg(vcpu, SPSR_EL1) = p->regval;
2171 else
2172 p->regval = __vcpu_sys_reg(vcpu, SPSR_EL1);
2173
2174 return true;
2175}
2176
94f29ab2
MZ
2177static u64 reset_hcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
2178{
2179 u64 val = r->val;
2180
2181 if (!cpus_have_final_cap(ARM64_HAS_HCR_NV1))
2182 val |= HCR_E2H;
2183
2184 return __vcpu_sys_reg(vcpu, r->reg) = val;
2185}
2186
7c8c5e6a
MZ
2187/*
2188 * Architected system registers.
2189 * Important: Must be sorted ascending by Op0, Op1, CRn, CRm, Op2
7609c125 2190 *
0c557ed4
MZ
2191 * Debug handling: We do trap most, if not all debug related system
2192 * registers. The implementation is good enough to ensure that a guest
2193 * can use these with minimal performance degradation. The drawback is
7dabf02f
OU
2194 * that we don't implement any of the external debug architecture.
2195 * This should be revisited if we ever encounter a more demanding
2196 * guest...
7c8c5e6a
MZ
2197 */
2198static const struct sys_reg_desc sys_reg_descs[] = {
0c557ed4
MZ
2199 DBG_BCR_BVR_WCR_WVR_EL1(0),
2200 DBG_BCR_BVR_WCR_WVR_EL1(1),
ee1b64e6
MR
2201 { SYS_DESC(SYS_MDCCINT_EL1), trap_debug_regs, reset_val, MDCCINT_EL1, 0 },
2202 { SYS_DESC(SYS_MDSCR_EL1), trap_debug_regs, reset_val, MDSCR_EL1, 0 },
0c557ed4
MZ
2203 DBG_BCR_BVR_WCR_WVR_EL1(2),
2204 DBG_BCR_BVR_WCR_WVR_EL1(3),
2205 DBG_BCR_BVR_WCR_WVR_EL1(4),
2206 DBG_BCR_BVR_WCR_WVR_EL1(5),
2207 DBG_BCR_BVR_WCR_WVR_EL1(6),
2208 DBG_BCR_BVR_WCR_WVR_EL1(7),
2209 DBG_BCR_BVR_WCR_WVR_EL1(8),
2210 DBG_BCR_BVR_WCR_WVR_EL1(9),
2211 DBG_BCR_BVR_WCR_WVR_EL1(10),
2212 DBG_BCR_BVR_WCR_WVR_EL1(11),
2213 DBG_BCR_BVR_WCR_WVR_EL1(12),
2214 DBG_BCR_BVR_WCR_WVR_EL1(13),
2215 DBG_BCR_BVR_WCR_WVR_EL1(14),
2216 DBG_BCR_BVR_WCR_WVR_EL1(15),
2217
ee1b64e6 2218 { SYS_DESC(SYS_MDRAR_EL1), trap_raz_wi },
f24adc65 2219 { SYS_DESC(SYS_OSLAR_EL1), trap_oslar_el1 },
d42e2671 2220 { SYS_DESC(SYS_OSLSR_EL1), trap_oslsr_el1, reset_val, OSLSR_EL1,
187de7c2 2221 OSLSR_EL1_OSLM_IMPLEMENTED, .set_user = set_oslsr_el1, },
ee1b64e6
MR
2222 { SYS_DESC(SYS_OSDLR_EL1), trap_raz_wi },
2223 { SYS_DESC(SYS_DBGPRCR_EL1), trap_raz_wi },
2224 { SYS_DESC(SYS_DBGCLAIMSET_EL1), trap_raz_wi },
2225 { SYS_DESC(SYS_DBGCLAIMCLR_EL1), trap_raz_wi },
2226 { SYS_DESC(SYS_DBGAUTHSTATUS_EL1), trap_dbgauthstatus_el1 },
2227
2228 { SYS_DESC(SYS_MDCCSR_EL0), trap_raz_wi },
2229 { SYS_DESC(SYS_DBGDTR_EL0), trap_raz_wi },
2230 // DBGDTR[TR]X_EL0 share the same encoding
2231 { SYS_DESC(SYS_DBGDTRTX_EL0), trap_raz_wi },
2232
c7d11a61 2233 { SYS_DESC(SYS_DBGVCR32_EL2), trap_undef, reset_val, DBGVCR32_EL2, 0 },
62a89c44 2234
851050a5 2235 { SYS_DESC(SYS_MPIDR_EL1), NULL, reset_mpidr, MPIDR_EL1 },
93390c0a
DM
2236
2237 /*
2238 * ID regs: all ID_SANITISED() entries here must have corresponding
2239 * entries in arm64_ftr_regs[].
2240 */
2241
2242 /* AArch64 mappings of the AArch32 ID registers */
2243 /* CRm=1 */
d5efec7e
OU
2244 AA32_ID_SANITISED(ID_PFR0_EL1),
2245 AA32_ID_SANITISED(ID_PFR1_EL1),
c118cead
JZ
2246 { SYS_DESC(SYS_ID_DFR0_EL1),
2247 .access = access_id_reg,
2248 .get_user = get_id_reg,
2249 .set_user = set_id_dfr0_el1,
2250 .visibility = aa32_id_visibility,
2251 .reset = read_sanitised_id_dfr0_el1,
9f9917bc
OU
2252 .val = ID_DFR0_EL1_PerfMon_MASK |
2253 ID_DFR0_EL1_CopDbg_MASK, },
93390c0a 2254 ID_HIDDEN(ID_AFR0_EL1),
d5efec7e
OU
2255 AA32_ID_SANITISED(ID_MMFR0_EL1),
2256 AA32_ID_SANITISED(ID_MMFR1_EL1),
2257 AA32_ID_SANITISED(ID_MMFR2_EL1),
2258 AA32_ID_SANITISED(ID_MMFR3_EL1),
93390c0a
DM
2259
2260 /* CRm=2 */
d5efec7e
OU
2261 AA32_ID_SANITISED(ID_ISAR0_EL1),
2262 AA32_ID_SANITISED(ID_ISAR1_EL1),
2263 AA32_ID_SANITISED(ID_ISAR2_EL1),
2264 AA32_ID_SANITISED(ID_ISAR3_EL1),
2265 AA32_ID_SANITISED(ID_ISAR4_EL1),
2266 AA32_ID_SANITISED(ID_ISAR5_EL1),
2267 AA32_ID_SANITISED(ID_MMFR4_EL1),
2268 AA32_ID_SANITISED(ID_ISAR6_EL1),
93390c0a
DM
2269
2270 /* CRm=3 */
d5efec7e
OU
2271 AA32_ID_SANITISED(MVFR0_EL1),
2272 AA32_ID_SANITISED(MVFR1_EL1),
2273 AA32_ID_SANITISED(MVFR2_EL1),
93390c0a 2274 ID_UNALLOCATED(3,3),
d5efec7e 2275 AA32_ID_SANITISED(ID_PFR2_EL1),
dd35ec07 2276 ID_HIDDEN(ID_DFR1_EL1),
d5efec7e 2277 AA32_ID_SANITISED(ID_MMFR5_EL1),
93390c0a
DM
2278 ID_UNALLOCATED(3,7),
2279
2280 /* AArch64 ID registers */
2281 /* CRm=4 */
c39f5974
JZ
2282 { SYS_DESC(SYS_ID_AA64PFR0_EL1),
2283 .access = access_id_reg,
2284 .get_user = get_id_reg,
68667240 2285 .set_user = set_id_reg,
c39f5974 2286 .reset = read_sanitised_id_aa64pfr0_el1,
8cfd5be8
JZ
2287 .val = ~(ID_AA64PFR0_EL1_AMU |
2288 ID_AA64PFR0_EL1_MPAM |
2289 ID_AA64PFR0_EL1_SVE |
2290 ID_AA64PFR0_EL1_RAS |
2291 ID_AA64PFR0_EL1_GIC |
2292 ID_AA64PFR0_EL1_AdvSIMD |
2293 ID_AA64PFR0_EL1_FP), },
93390c0a
DM
2294 ID_SANITISED(ID_AA64PFR1_EL1),
2295 ID_UNALLOCATED(4,2),
2296 ID_UNALLOCATED(4,3),
f89fbb35 2297 ID_WRITABLE(ID_AA64ZFR0_EL1, ~ID_AA64ZFR0_EL1_RES0),
90807748 2298 ID_HIDDEN(ID_AA64SMFR0_EL1),
93390c0a
DM
2299 ID_UNALLOCATED(4,6),
2300 ID_UNALLOCATED(4,7),
2301
2302 /* CRm=5 */
c118cead
JZ
2303 { SYS_DESC(SYS_ID_AA64DFR0_EL1),
2304 .access = access_id_reg,
2305 .get_user = get_id_reg,
2306 .set_user = set_id_aa64dfr0_el1,
2307 .reset = read_sanitised_id_aa64dfr0_el1,
9f9917bc
OU
2308 .val = ID_AA64DFR0_EL1_PMUVer_MASK |
2309 ID_AA64DFR0_EL1_DebugVer_MASK, },
93390c0a
DM
2310 ID_SANITISED(ID_AA64DFR1_EL1),
2311 ID_UNALLOCATED(5,2),
2312 ID_UNALLOCATED(5,3),
2313 ID_HIDDEN(ID_AA64AFR0_EL1),
2314 ID_HIDDEN(ID_AA64AFR1_EL1),
2315 ID_UNALLOCATED(5,6),
2316 ID_UNALLOCATED(5,7),
2317
2318 /* CRm=6 */
56d77aa8
OU
2319 ID_WRITABLE(ID_AA64ISAR0_EL1, ~ID_AA64ISAR0_EL1_RES0),
2320 ID_WRITABLE(ID_AA64ISAR1_EL1, ~(ID_AA64ISAR1_EL1_GPI |
2321 ID_AA64ISAR1_EL1_GPA |
2322 ID_AA64ISAR1_EL1_API |
2323 ID_AA64ISAR1_EL1_APA)),
2324 ID_WRITABLE(ID_AA64ISAR2_EL1, ~(ID_AA64ISAR2_EL1_RES0 |
56d77aa8
OU
2325 ID_AA64ISAR2_EL1_APA3 |
2326 ID_AA64ISAR2_EL1_GPA3)),
93390c0a
DM
2327 ID_UNALLOCATED(6,3),
2328 ID_UNALLOCATED(6,4),
2329 ID_UNALLOCATED(6,5),
2330 ID_UNALLOCATED(6,6),
2331 ID_UNALLOCATED(6,7),
2332
2333 /* CRm=7 */
d5a32b60
JZ
2334 ID_WRITABLE(ID_AA64MMFR0_EL1, ~(ID_AA64MMFR0_EL1_RES0 |
2335 ID_AA64MMFR0_EL1_TGRAN4_2 |
2336 ID_AA64MMFR0_EL1_TGRAN64_2 |
2337 ID_AA64MMFR0_EL1_TGRAN16_2)),
2338 ID_WRITABLE(ID_AA64MMFR1_EL1, ~(ID_AA64MMFR1_EL1_RES0 |
2339 ID_AA64MMFR1_EL1_HCX |
2340 ID_AA64MMFR1_EL1_XNX |
2341 ID_AA64MMFR1_EL1_TWED |
2342 ID_AA64MMFR1_EL1_XNX |
2343 ID_AA64MMFR1_EL1_VH |
2344 ID_AA64MMFR1_EL1_VMIDBits)),
2345 ID_WRITABLE(ID_AA64MMFR2_EL1, ~(ID_AA64MMFR2_EL1_RES0 |
2346 ID_AA64MMFR2_EL1_EVT |
2347 ID_AA64MMFR2_EL1_FWB |
2348 ID_AA64MMFR2_EL1_IDS |
2349 ID_AA64MMFR2_EL1_NV |
2350 ID_AA64MMFR2_EL1_CCIDX)),
8ef67c67 2351 ID_SANITISED(ID_AA64MMFR3_EL1),
c21df6e4 2352 ID_SANITISED(ID_AA64MMFR4_EL1),
93390c0a
DM
2353 ID_UNALLOCATED(7,5),
2354 ID_UNALLOCATED(7,6),
2355 ID_UNALLOCATED(7,7),
2356
851050a5 2357 { SYS_DESC(SYS_SCTLR_EL1), access_vm_reg, reset_val, SCTLR_EL1, 0x00C50078 },
af473829 2358 { SYS_DESC(SYS_ACTLR_EL1), access_actlr, reset_actlr, ACTLR_EL1 },
851050a5 2359 { SYS_DESC(SYS_CPACR_EL1), NULL, reset_val, CPACR_EL1, 0 },
2ac638fc 2360
e1f358b5
SP
2361 MTE_REG(RGSR_EL1),
2362 MTE_REG(GCR_EL1),
2ac638fc 2363
73433762 2364 { SYS_DESC(SYS_ZCR_EL1), NULL, reset_val, ZCR_EL1, 0, .visibility = sve_visibility },
cc427cbb 2365 { SYS_DESC(SYS_TRFCR_EL1), undef_access },
90807748
MB
2366 { SYS_DESC(SYS_SMPRI_EL1), undef_access },
2367 { SYS_DESC(SYS_SMCR_EL1), undef_access },
851050a5
MR
2368 { SYS_DESC(SYS_TTBR0_EL1), access_vm_reg, reset_unknown, TTBR0_EL1 },
2369 { SYS_DESC(SYS_TTBR1_EL1), access_vm_reg, reset_unknown, TTBR1_EL1 },
2370 { SYS_DESC(SYS_TCR_EL1), access_vm_reg, reset_val, TCR_EL1, 0 },
fbff5606 2371 { SYS_DESC(SYS_TCR2_EL1), access_vm_reg, reset_val, TCR2_EL1, 0 },
851050a5 2372
384b40ca
MR
2373 PTRAUTH_KEY(APIA),
2374 PTRAUTH_KEY(APIB),
2375 PTRAUTH_KEY(APDA),
2376 PTRAUTH_KEY(APDB),
2377 PTRAUTH_KEY(APGA),
2378
9da117ee
JL
2379 { SYS_DESC(SYS_SPSR_EL1), access_spsr},
2380 { SYS_DESC(SYS_ELR_EL1), access_elr},
2381
851050a5
MR
2382 { SYS_DESC(SYS_AFSR0_EL1), access_vm_reg, reset_unknown, AFSR0_EL1 },
2383 { SYS_DESC(SYS_AFSR1_EL1), access_vm_reg, reset_unknown, AFSR1_EL1 },
2384 { SYS_DESC(SYS_ESR_EL1), access_vm_reg, reset_unknown, ESR_EL1 },
558daf69
DG
2385
2386 { SYS_DESC(SYS_ERRIDR_EL1), trap_raz_wi },
2387 { SYS_DESC(SYS_ERRSELR_EL1), trap_raz_wi },
2388 { SYS_DESC(SYS_ERXFR_EL1), trap_raz_wi },
2389 { SYS_DESC(SYS_ERXCTLR_EL1), trap_raz_wi },
2390 { SYS_DESC(SYS_ERXSTATUS_EL1), trap_raz_wi },
2391 { SYS_DESC(SYS_ERXADDR_EL1), trap_raz_wi },
2392 { SYS_DESC(SYS_ERXMISC0_EL1), trap_raz_wi },
2393 { SYS_DESC(SYS_ERXMISC1_EL1), trap_raz_wi },
2394
e1f358b5
SP
2395 MTE_REG(TFSR_EL1),
2396 MTE_REG(TFSRE0_EL1),
2ac638fc 2397
851050a5
MR
2398 { SYS_DESC(SYS_FAR_EL1), access_vm_reg, reset_unknown, FAR_EL1 },
2399 { SYS_DESC(SYS_PAR_EL1), NULL, reset_unknown, PAR_EL1 },
7c8c5e6a 2400
13611bc8
AE
2401 { SYS_DESC(SYS_PMSCR_EL1), undef_access },
2402 { SYS_DESC(SYS_PMSNEVFR_EL1), undef_access },
2403 { SYS_DESC(SYS_PMSICR_EL1), undef_access },
2404 { SYS_DESC(SYS_PMSIRR_EL1), undef_access },
2405 { SYS_DESC(SYS_PMSFCR_EL1), undef_access },
2406 { SYS_DESC(SYS_PMSEVFR_EL1), undef_access },
2407 { SYS_DESC(SYS_PMSLATFR_EL1), undef_access },
2408 { SYS_DESC(SYS_PMSIDR_EL1), undef_access },
2409 { SYS_DESC(SYS_PMBLIMITR_EL1), undef_access },
2410 { SYS_DESC(SYS_PMBPTR_EL1), undef_access },
2411 { SYS_DESC(SYS_PMBSR_EL1), undef_access },
2412 /* PMBIDR_EL1 is not trapped */
2413
9d2a55b4 2414 { PMU_SYS_REG(PMINTENSET_EL1),
a45f41d7
RRA
2415 .access = access_pminten, .reg = PMINTENSET_EL1,
2416 .get_user = get_pmreg, .set_user = set_pmreg },
9d2a55b4 2417 { PMU_SYS_REG(PMINTENCLR_EL1),
a45f41d7
RRA
2418 .access = access_pminten, .reg = PMINTENSET_EL1,
2419 .get_user = get_pmreg, .set_user = set_pmreg },
46081078 2420 { SYS_DESC(SYS_PMMIR_EL1), trap_raz_wi },
7c8c5e6a 2421
851050a5 2422 { SYS_DESC(SYS_MAIR_EL1), access_vm_reg, reset_unknown, MAIR_EL1 },
839d9035
JG
2423 { SYS_DESC(SYS_PIRE0_EL1), NULL, reset_unknown, PIRE0_EL1 },
2424 { SYS_DESC(SYS_PIR_EL1), NULL, reset_unknown, PIR_EL1 },
851050a5 2425 { SYS_DESC(SYS_AMAIR_EL1), access_vm_reg, reset_amair_el1, AMAIR_EL1 },
7c8c5e6a 2426
22925521
MZ
2427 { SYS_DESC(SYS_LORSA_EL1), trap_loregion },
2428 { SYS_DESC(SYS_LOREA_EL1), trap_loregion },
2429 { SYS_DESC(SYS_LORN_EL1), trap_loregion },
2430 { SYS_DESC(SYS_LORC_EL1), trap_loregion },
2431 { SYS_DESC(SYS_LORID_EL1), trap_loregion },
cc33c4e2 2432
9da117ee 2433 { SYS_DESC(SYS_VBAR_EL1), access_rw, reset_val, VBAR_EL1, 0 },
c773ae2b 2434 { SYS_DESC(SYS_DISR_EL1), NULL, reset_val, DISR_EL1, 0 },
db7dedd0 2435
7b1dba1f 2436 { SYS_DESC(SYS_ICC_IAR0_EL1), write_to_read_only },
e7f1d1ee 2437 { SYS_DESC(SYS_ICC_EOIR0_EL1), read_from_write_only },
7b1dba1f 2438 { SYS_DESC(SYS_ICC_HPPIR0_EL1), write_to_read_only },
e7f1d1ee 2439 { SYS_DESC(SYS_ICC_DIR_EL1), read_from_write_only },
7b1dba1f 2440 { SYS_DESC(SYS_ICC_RPR_EL1), write_to_read_only },
e804d208 2441 { SYS_DESC(SYS_ICC_SGI1R_EL1), access_gic_sgi },
03bd646d
MZ
2442 { SYS_DESC(SYS_ICC_ASGI1R_EL1), access_gic_sgi },
2443 { SYS_DESC(SYS_ICC_SGI0R_EL1), access_gic_sgi },
7b1dba1f 2444 { SYS_DESC(SYS_ICC_IAR1_EL1), write_to_read_only },
e7f1d1ee 2445 { SYS_DESC(SYS_ICC_EOIR1_EL1), read_from_write_only },
7b1dba1f 2446 { SYS_DESC(SYS_ICC_HPPIR1_EL1), write_to_read_only },
e804d208 2447 { SYS_DESC(SYS_ICC_SRE_EL1), access_gic_sre },
db7dedd0 2448
851050a5
MR
2449 { SYS_DESC(SYS_CONTEXTIDR_EL1), access_vm_reg, reset_val, CONTEXTIDR_EL1, 0 },
2450 { SYS_DESC(SYS_TPIDR_EL1), NULL, reset_unknown, TPIDR_EL1 },
7c8c5e6a 2451
484f8682
MZ
2452 { SYS_DESC(SYS_ACCDATA_EL1), undef_access },
2453
ed4ffaf4
MZ
2454 { SYS_DESC(SYS_SCXTNUM_EL1), undef_access },
2455
851050a5 2456 { SYS_DESC(SYS_CNTKCTL_EL1), NULL, reset_val, CNTKCTL_EL1, 0},
7c8c5e6a 2457
f7f2b15c 2458 { SYS_DESC(SYS_CCSIDR_EL1), access_ccsidr },
7af0c253
AO
2459 { SYS_DESC(SYS_CLIDR_EL1), access_clidr, reset_clidr, CLIDR_EL1,
2460 .set_user = set_clidr },
bf48040c 2461 { SYS_DESC(SYS_CCSIDR2_EL1), undef_access },
90807748 2462 { SYS_DESC(SYS_SMIDR_EL1), undef_access },
f7f2b15c
AB
2463 { SYS_DESC(SYS_CSSELR_EL1), access_csselr, reset_unknown, CSSELR_EL1 },
2464 { SYS_DESC(SYS_CTR_EL0), access_ctr },
ec0067a6 2465 { SYS_DESC(SYS_SVCR), undef_access },
7c8c5e6a 2466
ea9ca904
RW
2467 { PMU_SYS_REG(PMCR_EL0), .access = access_pmcr, .reset = reset_pmcr,
2468 .reg = PMCR_EL0, .get_user = get_pmcr, .set_user = set_pmcr },
9d2a55b4 2469 { PMU_SYS_REG(PMCNTENSET_EL0),
a45f41d7
RRA
2470 .access = access_pmcnten, .reg = PMCNTENSET_EL0,
2471 .get_user = get_pmreg, .set_user = set_pmreg },
9d2a55b4 2472 { PMU_SYS_REG(PMCNTENCLR_EL0),
a45f41d7
RRA
2473 .access = access_pmcnten, .reg = PMCNTENSET_EL0,
2474 .get_user = get_pmreg, .set_user = set_pmreg },
9d2a55b4 2475 { PMU_SYS_REG(PMOVSCLR_EL0),
a45f41d7
RRA
2476 .access = access_pmovs, .reg = PMOVSSET_EL0,
2477 .get_user = get_pmreg, .set_user = set_pmreg },
7a3ba309
MZ
2478 /*
2479 * PM_SWINC_EL0 is exposed to userspace as RAZ/WI, as it was
2480 * previously (and pointlessly) advertised in the past...
2481 */
9d2a55b4 2482 { PMU_SYS_REG(PMSWINC_EL0),
5a430976 2483 .get_user = get_raz_reg, .set_user = set_wi_reg,
7a3ba309 2484 .access = access_pmswinc, .reset = NULL },
9d2a55b4 2485 { PMU_SYS_REG(PMSELR_EL0),
0ab410a9 2486 .access = access_pmselr, .reset = reset_pmselr, .reg = PMSELR_EL0 },
9d2a55b4 2487 { PMU_SYS_REG(PMCEID0_EL0),
11663111 2488 .access = access_pmceid, .reset = NULL },
9d2a55b4 2489 { PMU_SYS_REG(PMCEID1_EL0),
11663111 2490 .access = access_pmceid, .reset = NULL },
9d2a55b4 2491 { PMU_SYS_REG(PMCCNTR_EL0),
9228b261
RW
2492 .access = access_pmu_evcntr, .reset = reset_unknown,
2493 .reg = PMCCNTR_EL0, .get_user = get_pmu_evcntr},
9d2a55b4 2494 { PMU_SYS_REG(PMXEVTYPER_EL0),
11663111 2495 .access = access_pmu_evtyper, .reset = NULL },
9d2a55b4 2496 { PMU_SYS_REG(PMXEVCNTR_EL0),
11663111 2497 .access = access_pmu_evcntr, .reset = NULL },
174ed3e4
MR
2498 /*
2499 * PMUSERENR_EL0 resets as unknown in 64bit mode while it resets as zero
d692b8ad
SZ
2500 * in 32bit mode. Here we choose to reset it as zero for consistency.
2501 */
9d2a55b4 2502 { PMU_SYS_REG(PMUSERENR_EL0), .access = access_pmuserenr,
11663111 2503 .reset = reset_val, .reg = PMUSERENR_EL0, .val = 0 },
9d2a55b4 2504 { PMU_SYS_REG(PMOVSSET_EL0),
a45f41d7
RRA
2505 .access = access_pmovs, .reg = PMOVSSET_EL0,
2506 .get_user = get_pmreg, .set_user = set_pmreg },
7c8c5e6a 2507
851050a5
MR
2508 { SYS_DESC(SYS_TPIDR_EL0), NULL, reset_unknown, TPIDR_EL0 },
2509 { SYS_DESC(SYS_TPIDRRO_EL0), NULL, reset_unknown, TPIDRRO_EL0 },
90807748 2510 { SYS_DESC(SYS_TPIDR2_EL0), undef_access },
4fcdf106 2511
ed4ffaf4
MZ
2512 { SYS_DESC(SYS_SCXTNUM_EL0), undef_access },
2513
338b1793
MZ
2514 { SYS_DESC(SYS_AMCR_EL0), undef_access },
2515 { SYS_DESC(SYS_AMCFGR_EL0), undef_access },
2516 { SYS_DESC(SYS_AMCGCR_EL0), undef_access },
2517 { SYS_DESC(SYS_AMUSERENR_EL0), undef_access },
2518 { SYS_DESC(SYS_AMCNTENCLR0_EL0), undef_access },
2519 { SYS_DESC(SYS_AMCNTENSET0_EL0), undef_access },
2520 { SYS_DESC(SYS_AMCNTENCLR1_EL0), undef_access },
2521 { SYS_DESC(SYS_AMCNTENSET1_EL0), undef_access },
4fcdf106
IV
2522 AMU_AMEVCNTR0_EL0(0),
2523 AMU_AMEVCNTR0_EL0(1),
2524 AMU_AMEVCNTR0_EL0(2),
2525 AMU_AMEVCNTR0_EL0(3),
2526 AMU_AMEVCNTR0_EL0(4),
2527 AMU_AMEVCNTR0_EL0(5),
2528 AMU_AMEVCNTR0_EL0(6),
2529 AMU_AMEVCNTR0_EL0(7),
2530 AMU_AMEVCNTR0_EL0(8),
2531 AMU_AMEVCNTR0_EL0(9),
2532 AMU_AMEVCNTR0_EL0(10),
2533 AMU_AMEVCNTR0_EL0(11),
2534 AMU_AMEVCNTR0_EL0(12),
2535 AMU_AMEVCNTR0_EL0(13),
2536 AMU_AMEVCNTR0_EL0(14),
2537 AMU_AMEVCNTR0_EL0(15),
493cf9b7
VM
2538 AMU_AMEVTYPER0_EL0(0),
2539 AMU_AMEVTYPER0_EL0(1),
2540 AMU_AMEVTYPER0_EL0(2),
2541 AMU_AMEVTYPER0_EL0(3),
2542 AMU_AMEVTYPER0_EL0(4),
2543 AMU_AMEVTYPER0_EL0(5),
2544 AMU_AMEVTYPER0_EL0(6),
2545 AMU_AMEVTYPER0_EL0(7),
2546 AMU_AMEVTYPER0_EL0(8),
2547 AMU_AMEVTYPER0_EL0(9),
2548 AMU_AMEVTYPER0_EL0(10),
2549 AMU_AMEVTYPER0_EL0(11),
2550 AMU_AMEVTYPER0_EL0(12),
2551 AMU_AMEVTYPER0_EL0(13),
2552 AMU_AMEVTYPER0_EL0(14),
2553 AMU_AMEVTYPER0_EL0(15),
4fcdf106
IV
2554 AMU_AMEVCNTR1_EL0(0),
2555 AMU_AMEVCNTR1_EL0(1),
2556 AMU_AMEVCNTR1_EL0(2),
2557 AMU_AMEVCNTR1_EL0(3),
2558 AMU_AMEVCNTR1_EL0(4),
2559 AMU_AMEVCNTR1_EL0(5),
2560 AMU_AMEVCNTR1_EL0(6),
2561 AMU_AMEVCNTR1_EL0(7),
2562 AMU_AMEVCNTR1_EL0(8),
2563 AMU_AMEVCNTR1_EL0(9),
2564 AMU_AMEVCNTR1_EL0(10),
2565 AMU_AMEVCNTR1_EL0(11),
2566 AMU_AMEVCNTR1_EL0(12),
2567 AMU_AMEVCNTR1_EL0(13),
2568 AMU_AMEVCNTR1_EL0(14),
2569 AMU_AMEVCNTR1_EL0(15),
493cf9b7
VM
2570 AMU_AMEVTYPER1_EL0(0),
2571 AMU_AMEVTYPER1_EL0(1),
2572 AMU_AMEVTYPER1_EL0(2),
2573 AMU_AMEVTYPER1_EL0(3),
2574 AMU_AMEVTYPER1_EL0(4),
2575 AMU_AMEVTYPER1_EL0(5),
2576 AMU_AMEVTYPER1_EL0(6),
2577 AMU_AMEVTYPER1_EL0(7),
2578 AMU_AMEVTYPER1_EL0(8),
2579 AMU_AMEVTYPER1_EL0(9),
2580 AMU_AMEVTYPER1_EL0(10),
2581 AMU_AMEVTYPER1_EL0(11),
2582 AMU_AMEVTYPER1_EL0(12),
2583 AMU_AMEVTYPER1_EL0(13),
2584 AMU_AMEVTYPER1_EL0(14),
2585 AMU_AMEVTYPER1_EL0(15),
62a89c44 2586
c605ee24
MZ
2587 { SYS_DESC(SYS_CNTPCT_EL0), access_arch_timer },
2588 { SYS_DESC(SYS_CNTPCTSS_EL0), access_arch_timer },
84135d3d
AP
2589 { SYS_DESC(SYS_CNTP_TVAL_EL0), access_arch_timer },
2590 { SYS_DESC(SYS_CNTP_CTL_EL0), access_arch_timer },
2591 { SYS_DESC(SYS_CNTP_CVAL_EL0), access_arch_timer },
c9a3c58f 2592
051ff581
SZ
2593 /* PMEVCNTRn_EL0 */
2594 PMU_PMEVCNTR_EL0(0),
2595 PMU_PMEVCNTR_EL0(1),
2596 PMU_PMEVCNTR_EL0(2),
2597 PMU_PMEVCNTR_EL0(3),
2598 PMU_PMEVCNTR_EL0(4),
2599 PMU_PMEVCNTR_EL0(5),
2600 PMU_PMEVCNTR_EL0(6),
2601 PMU_PMEVCNTR_EL0(7),
2602 PMU_PMEVCNTR_EL0(8),
2603 PMU_PMEVCNTR_EL0(9),
2604 PMU_PMEVCNTR_EL0(10),
2605 PMU_PMEVCNTR_EL0(11),
2606 PMU_PMEVCNTR_EL0(12),
2607 PMU_PMEVCNTR_EL0(13),
2608 PMU_PMEVCNTR_EL0(14),
2609 PMU_PMEVCNTR_EL0(15),
2610 PMU_PMEVCNTR_EL0(16),
2611 PMU_PMEVCNTR_EL0(17),
2612 PMU_PMEVCNTR_EL0(18),
2613 PMU_PMEVCNTR_EL0(19),
2614 PMU_PMEVCNTR_EL0(20),
2615 PMU_PMEVCNTR_EL0(21),
2616 PMU_PMEVCNTR_EL0(22),
2617 PMU_PMEVCNTR_EL0(23),
2618 PMU_PMEVCNTR_EL0(24),
2619 PMU_PMEVCNTR_EL0(25),
2620 PMU_PMEVCNTR_EL0(26),
2621 PMU_PMEVCNTR_EL0(27),
2622 PMU_PMEVCNTR_EL0(28),
2623 PMU_PMEVCNTR_EL0(29),
2624 PMU_PMEVCNTR_EL0(30),
9feb21ac
SZ
2625 /* PMEVTYPERn_EL0 */
2626 PMU_PMEVTYPER_EL0(0),
2627 PMU_PMEVTYPER_EL0(1),
2628 PMU_PMEVTYPER_EL0(2),
2629 PMU_PMEVTYPER_EL0(3),
2630 PMU_PMEVTYPER_EL0(4),
2631 PMU_PMEVTYPER_EL0(5),
2632 PMU_PMEVTYPER_EL0(6),
2633 PMU_PMEVTYPER_EL0(7),
2634 PMU_PMEVTYPER_EL0(8),
2635 PMU_PMEVTYPER_EL0(9),
2636 PMU_PMEVTYPER_EL0(10),
2637 PMU_PMEVTYPER_EL0(11),
2638 PMU_PMEVTYPER_EL0(12),
2639 PMU_PMEVTYPER_EL0(13),
2640 PMU_PMEVTYPER_EL0(14),
2641 PMU_PMEVTYPER_EL0(15),
2642 PMU_PMEVTYPER_EL0(16),
2643 PMU_PMEVTYPER_EL0(17),
2644 PMU_PMEVTYPER_EL0(18),
2645 PMU_PMEVTYPER_EL0(19),
2646 PMU_PMEVTYPER_EL0(20),
2647 PMU_PMEVTYPER_EL0(21),
2648 PMU_PMEVTYPER_EL0(22),
2649 PMU_PMEVTYPER_EL0(23),
2650 PMU_PMEVTYPER_EL0(24),
2651 PMU_PMEVTYPER_EL0(25),
2652 PMU_PMEVTYPER_EL0(26),
2653 PMU_PMEVTYPER_EL0(27),
2654 PMU_PMEVTYPER_EL0(28),
2655 PMU_PMEVTYPER_EL0(29),
2656 PMU_PMEVTYPER_EL0(30),
174ed3e4
MR
2657 /*
2658 * PMCCFILTR_EL0 resets as unknown in 64bit mode while it resets as zero
9feb21ac
SZ
2659 * in 32bit mode. Here we choose to reset it as zero for consistency.
2660 */
9d2a55b4 2661 { PMU_SYS_REG(PMCCFILTR_EL0), .access = access_pmu_evtyper,
11663111 2662 .reset = reset_val, .reg = PMCCFILTR_EL0, .val = 0 },
051ff581 2663
9b9cce60
MZ
2664 EL2_REG_VNCR(VPIDR_EL2, reset_unknown, 0),
2665 EL2_REG_VNCR(VMPIDR_EL2, reset_unknown, 0),
6ff9dc23
JL
2666 EL2_REG(SCTLR_EL2, access_rw, reset_val, SCTLR_EL2_RES1),
2667 EL2_REG(ACTLR_EL2, access_rw, reset_val, 0),
94f29ab2 2668 EL2_REG_VNCR(HCR_EL2, reset_hcr, 0),
6ff9dc23 2669 EL2_REG(MDCR_EL2, access_rw, reset_val, 0),
75c76ab5 2670 EL2_REG(CPTR_EL2, access_rw, reset_val, CPTR_NVHE_EL2_RES1),
9b9cce60
MZ
2671 EL2_REG_VNCR(HSTR_EL2, reset_val, 0),
2672 EL2_REG_VNCR(HFGRTR_EL2, reset_val, 0),
2673 EL2_REG_VNCR(HFGWTR_EL2, reset_val, 0),
2674 EL2_REG_VNCR(HFGITR_EL2, reset_val, 0),
2675 EL2_REG_VNCR(HACR_EL2, reset_val, 0),
6ff9dc23 2676
9b9cce60 2677 EL2_REG_VNCR(HCRX_EL2, reset_val, 0),
03fb54d0 2678
6ff9dc23
JL
2679 EL2_REG(TTBR0_EL2, access_rw, reset_val, 0),
2680 EL2_REG(TTBR1_EL2, access_rw, reset_val, 0),
2681 EL2_REG(TCR_EL2, access_rw, reset_val, TCR_EL2_RES1),
9b9cce60
MZ
2682 EL2_REG_VNCR(VTTBR_EL2, reset_val, 0),
2683 EL2_REG_VNCR(VTCR_EL2, reset_val, 0),
6ff9dc23 2684
c7d11a61 2685 { SYS_DESC(SYS_DACR32_EL2), trap_undef, reset_unknown, DACR32_EL2 },
9b9cce60
MZ
2686 EL2_REG_VNCR(HDFGRTR_EL2, reset_val, 0),
2687 EL2_REG_VNCR(HDFGWTR_EL2, reset_val, 0),
d016264d 2688 EL2_REG_VNCR(HAFGRTR_EL2, reset_val, 0),
9b9cce60
MZ
2689 EL2_REG_REDIR(SPSR_EL2, reset_val, 0),
2690 EL2_REG_REDIR(ELR_EL2, reset_val, 0),
6ff9dc23
JL
2691 { SYS_DESC(SYS_SP_EL1), access_sp_el1},
2692
3f7915cc
MZ
2693 /* AArch32 SPSR_* are RES0 if trapped from a NV guest */
2694 { SYS_DESC(SYS_SPSR_irq), .access = trap_raz_wi,
2695 .visibility = hidden_user_visibility },
2696 { SYS_DESC(SYS_SPSR_abt), .access = trap_raz_wi,
2697 .visibility = hidden_user_visibility },
2698 { SYS_DESC(SYS_SPSR_und), .access = trap_raz_wi,
2699 .visibility = hidden_user_visibility },
2700 { SYS_DESC(SYS_SPSR_fiq), .access = trap_raz_wi,
2701 .visibility = hidden_user_visibility },
2702
c7d11a61 2703 { SYS_DESC(SYS_IFSR32_EL2), trap_undef, reset_unknown, IFSR32_EL2 },
6ff9dc23
JL
2704 EL2_REG(AFSR0_EL2, access_rw, reset_val, 0),
2705 EL2_REG(AFSR1_EL2, access_rw, reset_val, 0),
9b9cce60 2706 EL2_REG_REDIR(ESR_EL2, reset_val, 0),
c7d11a61 2707 { SYS_DESC(SYS_FPEXC32_EL2), trap_undef, reset_val, FPEXC32_EL2, 0x700 },
6ff9dc23 2708
9b9cce60 2709 EL2_REG_REDIR(FAR_EL2, reset_val, 0),
6ff9dc23
JL
2710 EL2_REG(HPFAR_EL2, access_rw, reset_val, 0),
2711
2712 EL2_REG(MAIR_EL2, access_rw, reset_val, 0),
2713 EL2_REG(AMAIR_EL2, access_rw, reset_val, 0),
2714
2715 EL2_REG(VBAR_EL2, access_rw, reset_val, 0),
2716 EL2_REG(RVBAR_EL2, access_rw, reset_val, 0),
2717 { SYS_DESC(SYS_RMR_EL2), trap_undef },
2718
2719 EL2_REG(CONTEXTIDR_EL2, access_rw, reset_val, 0),
2720 EL2_REG(TPIDR_EL2, access_rw, reset_val, 0),
2721
9b9cce60 2722 EL2_REG_VNCR(CNTVOFF_EL2, reset_val, 0),
6ff9dc23
JL
2723 EL2_REG(CNTHCTL_EL2, access_rw, reset_val, 0),
2724
280b748e
JL
2725 EL12_REG(CNTKCTL, access_rw, reset_val, 0),
2726
6ff9dc23 2727 EL2_REG(SP_EL2, NULL, reset_unknown, 0),
62a89c44
MZ
2728};
2729
89bc63fa
MZ
2730static struct sys_reg_desc sys_insn_descs[] = {
2731 { SYS_DESC(SYS_DC_ISW), access_dcsw },
2732 { SYS_DESC(SYS_DC_IGSW), access_dcgsw },
2733 { SYS_DESC(SYS_DC_IGDSW), access_dcgsw },
2734 { SYS_DESC(SYS_DC_CSW), access_dcsw },
2735 { SYS_DESC(SYS_DC_CGSW), access_dcgsw },
2736 { SYS_DESC(SYS_DC_CGDSW), access_dcgsw },
2737 { SYS_DESC(SYS_DC_CISW), access_dcsw },
2738 { SYS_DESC(SYS_DC_CIGSW), access_dcgsw },
2739 { SYS_DESC(SYS_DC_CIGDSW), access_dcgsw },
2740};
2741
47334146
JZ
2742static const struct sys_reg_desc *first_idreg;
2743
8c358b29 2744static bool trap_dbgdidr(struct kvm_vcpu *vcpu,
3fec037d 2745 struct sys_reg_params *p,
bdfb4b38
MZ
2746 const struct sys_reg_desc *r)
2747{
2748 if (p->is_write) {
2749 return ignore_write(vcpu, p);
2750 } else {
8b6958d6 2751 u64 dfr = IDREG(vcpu->kvm, SYS_ID_AA64DFR0_EL1);
c62d7a23 2752 u32 el3 = kvm_has_feat(vcpu->kvm, ID_AA64PFR0_EL1, EL3, IMP);
bdfb4b38 2753
5a23e5c7
OU
2754 p->regval = ((SYS_FIELD_GET(ID_AA64DFR0_EL1, WRPs, dfr) << 28) |
2755 (SYS_FIELD_GET(ID_AA64DFR0_EL1, BRPs, dfr) << 24) |
2756 (SYS_FIELD_GET(ID_AA64DFR0_EL1, CTX_CMPs, dfr) << 20) |
2757 (SYS_FIELD_GET(ID_AA64DFR0_EL1, DebugVer, dfr) << 16) |
2758 (1 << 15) | (el3 << 14) | (el3 << 12));
bdfb4b38
MZ
2759 return true;
2760 }
2761}
2762
1da42c34
MZ
2763/*
2764 * AArch32 debug register mappings
84e690bf
AB
2765 *
2766 * AArch32 DBGBVRn is mapped to DBGBVRn_EL1[31:0]
2767 * AArch32 DBGBXVRn is mapped to DBGBVRn_EL1[63:32]
2768 *
1da42c34
MZ
2769 * None of the other registers share their location, so treat them as
2770 * if they were 64bit.
84e690bf 2771 */
1da42c34
MZ
2772#define DBG_BCR_BVR_WCR_WVR(n) \
2773 /* DBGBVRn */ \
2774 { AA32(LO), Op1( 0), CRn( 0), CRm((n)), Op2( 4), trap_bvr, NULL, n }, \
2775 /* DBGBCRn */ \
2776 { Op1( 0), CRn( 0), CRm((n)), Op2( 5), trap_bcr, NULL, n }, \
2777 /* DBGWVRn */ \
2778 { Op1( 0), CRn( 0), CRm((n)), Op2( 6), trap_wvr, NULL, n }, \
2779 /* DBGWCRn */ \
84e690bf
AB
2780 { Op1( 0), CRn( 0), CRm((n)), Op2( 7), trap_wcr, NULL, n }
2781
1da42c34
MZ
2782#define DBGBXVR(n) \
2783 { AA32(HI), Op1( 0), CRn( 1), CRm((n)), Op2( 1), trap_bvr, NULL, n }
bdfb4b38
MZ
2784
2785/*
2786 * Trapped cp14 registers. We generally ignore most of the external
2787 * debug, on the principle that they don't really make sense to a
84e690bf 2788 * guest. Revisit this one day, would this principle change.
bdfb4b38 2789 */
72564016 2790static const struct sys_reg_desc cp14_regs[] = {
8c358b29
AE
2791 /* DBGDIDR */
2792 { Op1( 0), CRn( 0), CRm( 0), Op2( 0), trap_dbgdidr },
bdfb4b38
MZ
2793 /* DBGDTRRXext */
2794 { Op1( 0), CRn( 0), CRm( 0), Op2( 2), trap_raz_wi },
2795
2796 DBG_BCR_BVR_WCR_WVR(0),
2797 /* DBGDSCRint */
2798 { Op1( 0), CRn( 0), CRm( 1), Op2( 0), trap_raz_wi },
2799 DBG_BCR_BVR_WCR_WVR(1),
2800 /* DBGDCCINT */
1da42c34 2801 { Op1( 0), CRn( 0), CRm( 2), Op2( 0), trap_debug_regs, NULL, MDCCINT_EL1 },
bdfb4b38 2802 /* DBGDSCRext */
1da42c34 2803 { Op1( 0), CRn( 0), CRm( 2), Op2( 2), trap_debug_regs, NULL, MDSCR_EL1 },
bdfb4b38
MZ
2804 DBG_BCR_BVR_WCR_WVR(2),
2805 /* DBGDTR[RT]Xint */
2806 { Op1( 0), CRn( 0), CRm( 3), Op2( 0), trap_raz_wi },
2807 /* DBGDTR[RT]Xext */
2808 { Op1( 0), CRn( 0), CRm( 3), Op2( 2), trap_raz_wi },
2809 DBG_BCR_BVR_WCR_WVR(3),
2810 DBG_BCR_BVR_WCR_WVR(4),
2811 DBG_BCR_BVR_WCR_WVR(5),
2812 /* DBGWFAR */
2813 { Op1( 0), CRn( 0), CRm( 6), Op2( 0), trap_raz_wi },
2814 /* DBGOSECCR */
2815 { Op1( 0), CRn( 0), CRm( 6), Op2( 2), trap_raz_wi },
2816 DBG_BCR_BVR_WCR_WVR(6),
2817 /* DBGVCR */
1da42c34 2818 { Op1( 0), CRn( 0), CRm( 7), Op2( 0), trap_debug_regs, NULL, DBGVCR32_EL2 },
bdfb4b38
MZ
2819 DBG_BCR_BVR_WCR_WVR(7),
2820 DBG_BCR_BVR_WCR_WVR(8),
2821 DBG_BCR_BVR_WCR_WVR(9),
2822 DBG_BCR_BVR_WCR_WVR(10),
2823 DBG_BCR_BVR_WCR_WVR(11),
2824 DBG_BCR_BVR_WCR_WVR(12),
2825 DBG_BCR_BVR_WCR_WVR(13),
2826 DBG_BCR_BVR_WCR_WVR(14),
2827 DBG_BCR_BVR_WCR_WVR(15),
2828
2829 /* DBGDRAR (32bit) */
2830 { Op1( 0), CRn( 1), CRm( 0), Op2( 0), trap_raz_wi },
2831
2832 DBGBXVR(0),
2833 /* DBGOSLAR */
f24adc65 2834 { Op1( 0), CRn( 1), CRm( 0), Op2( 4), trap_oslar_el1 },
bdfb4b38
MZ
2835 DBGBXVR(1),
2836 /* DBGOSLSR */
d42e2671 2837 { Op1( 0), CRn( 1), CRm( 1), Op2( 4), trap_oslsr_el1, NULL, OSLSR_EL1 },
bdfb4b38
MZ
2838 DBGBXVR(2),
2839 DBGBXVR(3),
2840 /* DBGOSDLR */
2841 { Op1( 0), CRn( 1), CRm( 3), Op2( 4), trap_raz_wi },
2842 DBGBXVR(4),
2843 /* DBGPRCR */
2844 { Op1( 0), CRn( 1), CRm( 4), Op2( 4), trap_raz_wi },
2845 DBGBXVR(5),
2846 DBGBXVR(6),
2847 DBGBXVR(7),
2848 DBGBXVR(8),
2849 DBGBXVR(9),
2850 DBGBXVR(10),
2851 DBGBXVR(11),
2852 DBGBXVR(12),
2853 DBGBXVR(13),
2854 DBGBXVR(14),
2855 DBGBXVR(15),
2856
2857 /* DBGDSAR (32bit) */
2858 { Op1( 0), CRn( 2), CRm( 0), Op2( 0), trap_raz_wi },
2859
2860 /* DBGDEVID2 */
2861 { Op1( 0), CRn( 7), CRm( 0), Op2( 7), trap_raz_wi },
2862 /* DBGDEVID1 */
2863 { Op1( 0), CRn( 7), CRm( 1), Op2( 7), trap_raz_wi },
2864 /* DBGDEVID */
2865 { Op1( 0), CRn( 7), CRm( 2), Op2( 7), trap_raz_wi },
2866 /* DBGCLAIMSET */
2867 { Op1( 0), CRn( 7), CRm( 8), Op2( 6), trap_raz_wi },
2868 /* DBGCLAIMCLR */
2869 { Op1( 0), CRn( 7), CRm( 9), Op2( 6), trap_raz_wi },
2870 /* DBGAUTHSTATUS */
2871 { Op1( 0), CRn( 7), CRm(14), Op2( 6), trap_dbgauthstatus_el1 },
72564016
MZ
2872};
2873
a9866ba0
MZ
2874/* Trapped cp14 64bit registers */
2875static const struct sys_reg_desc cp14_64_regs[] = {
bdfb4b38
MZ
2876 /* DBGDRAR (64bit) */
2877 { Op1( 0), CRm( 1), .access = trap_raz_wi },
2878
2879 /* DBGDSAR (64bit) */
2880 { Op1( 0), CRm( 2), .access = trap_raz_wi },
a9866ba0
MZ
2881};
2882
a9e192cd
AE
2883#define CP15_PMU_SYS_REG(_map, _Op1, _CRn, _CRm, _Op2) \
2884 AA32(_map), \
2885 Op1(_Op1), CRn(_CRn), CRm(_CRm), Op2(_Op2), \
2886 .visibility = pmu_visibility
2887
051ff581
SZ
2888/* Macro to expand the PMEVCNTRn register */
2889#define PMU_PMEVCNTR(n) \
a9e192cd
AE
2890 { CP15_PMU_SYS_REG(DIRECT, 0, 0b1110, \
2891 (0b1000 | (((n) >> 3) & 0x3)), ((n) & 0x7)), \
2892 .access = access_pmu_evcntr }
051ff581 2893
9feb21ac
SZ
2894/* Macro to expand the PMEVTYPERn register */
2895#define PMU_PMEVTYPER(n) \
a9e192cd
AE
2896 { CP15_PMU_SYS_REG(DIRECT, 0, 0b1110, \
2897 (0b1100 | (((n) >> 3) & 0x3)), ((n) & 0x7)), \
2898 .access = access_pmu_evtyper }
4d44923b
MZ
2899/*
2900 * Trapped cp15 registers. TTBR0/TTBR1 get a double encoding,
2901 * depending on the way they are accessed (as a 32bit or a 64bit
2902 * register).
2903 */
62a89c44 2904static const struct sys_reg_desc cp15_regs[] = {
f7f2b15c 2905 { Op1( 0), CRn( 0), CRm( 0), Op2( 1), access_ctr },
b1ea1d76
MZ
2906 { Op1( 0), CRn( 1), CRm( 0), Op2( 0), access_vm_reg, NULL, SCTLR_EL1 },
2907 /* ACTLR */
2908 { AA32(LO), Op1( 0), CRn( 1), CRm( 0), Op2( 1), access_actlr, NULL, ACTLR_EL1 },
2909 /* ACTLR2 */
2910 { AA32(HI), Op1( 0), CRn( 1), CRm( 0), Op2( 3), access_actlr, NULL, ACTLR_EL1 },
2911 { Op1( 0), CRn( 2), CRm( 0), Op2( 0), access_vm_reg, NULL, TTBR0_EL1 },
2912 { Op1( 0), CRn( 2), CRm( 0), Op2( 1), access_vm_reg, NULL, TTBR1_EL1 },
2913 /* TTBCR */
2914 { AA32(LO), Op1( 0), CRn( 2), CRm( 0), Op2( 2), access_vm_reg, NULL, TCR_EL1 },
2915 /* TTBCR2 */
2916 { AA32(HI), Op1( 0), CRn( 2), CRm( 0), Op2( 3), access_vm_reg, NULL, TCR_EL1 },
2917 { Op1( 0), CRn( 3), CRm( 0), Op2( 0), access_vm_reg, NULL, DACR32_EL2 },
2918 /* DFSR */
2919 { Op1( 0), CRn( 5), CRm( 0), Op2( 0), access_vm_reg, NULL, ESR_EL1 },
2920 { Op1( 0), CRn( 5), CRm( 0), Op2( 1), access_vm_reg, NULL, IFSR32_EL2 },
2921 /* ADFSR */
2922 { Op1( 0), CRn( 5), CRm( 1), Op2( 0), access_vm_reg, NULL, AFSR0_EL1 },
2923 /* AIFSR */
2924 { Op1( 0), CRn( 5), CRm( 1), Op2( 1), access_vm_reg, NULL, AFSR1_EL1 },
2925 /* DFAR */
2926 { AA32(LO), Op1( 0), CRn( 6), CRm( 0), Op2( 0), access_vm_reg, NULL, FAR_EL1 },
2927 /* IFAR */
2928 { AA32(HI), Op1( 0), CRn( 6), CRm( 0), Op2( 2), access_vm_reg, NULL, FAR_EL1 },
4d44923b 2929
62a89c44
MZ
2930 /*
2931 * DC{C,I,CI}SW operations:
2932 */
2933 { Op1( 0), CRn( 7), CRm( 6), Op2( 2), access_dcsw },
2934 { Op1( 0), CRn( 7), CRm(10), Op2( 2), access_dcsw },
2935 { Op1( 0), CRn( 7), CRm(14), Op2( 2), access_dcsw },
4d44923b 2936
7609c125 2937 /* PMU */
a9e192cd
AE
2938 { CP15_PMU_SYS_REG(DIRECT, 0, 9, 12, 0), .access = access_pmcr },
2939 { CP15_PMU_SYS_REG(DIRECT, 0, 9, 12, 1), .access = access_pmcnten },
2940 { CP15_PMU_SYS_REG(DIRECT, 0, 9, 12, 2), .access = access_pmcnten },
2941 { CP15_PMU_SYS_REG(DIRECT, 0, 9, 12, 3), .access = access_pmovs },
2942 { CP15_PMU_SYS_REG(DIRECT, 0, 9, 12, 4), .access = access_pmswinc },
2943 { CP15_PMU_SYS_REG(DIRECT, 0, 9, 12, 5), .access = access_pmselr },
2944 { CP15_PMU_SYS_REG(LO, 0, 9, 12, 6), .access = access_pmceid },
2945 { CP15_PMU_SYS_REG(LO, 0, 9, 12, 7), .access = access_pmceid },
2946 { CP15_PMU_SYS_REG(DIRECT, 0, 9, 13, 0), .access = access_pmu_evcntr },
2947 { CP15_PMU_SYS_REG(DIRECT, 0, 9, 13, 1), .access = access_pmu_evtyper },
2948 { CP15_PMU_SYS_REG(DIRECT, 0, 9, 13, 2), .access = access_pmu_evcntr },
2949 { CP15_PMU_SYS_REG(DIRECT, 0, 9, 14, 0), .access = access_pmuserenr },
2950 { CP15_PMU_SYS_REG(DIRECT, 0, 9, 14, 1), .access = access_pminten },
2951 { CP15_PMU_SYS_REG(DIRECT, 0, 9, 14, 2), .access = access_pminten },
2952 { CP15_PMU_SYS_REG(DIRECT, 0, 9, 14, 3), .access = access_pmovs },
2953 { CP15_PMU_SYS_REG(HI, 0, 9, 14, 4), .access = access_pmceid },
2954 { CP15_PMU_SYS_REG(HI, 0, 9, 14, 5), .access = access_pmceid },
46081078 2955 /* PMMIR */
a9e192cd 2956 { CP15_PMU_SYS_REG(DIRECT, 0, 9, 14, 6), .access = trap_raz_wi },
4d44923b 2957
b1ea1d76
MZ
2958 /* PRRR/MAIR0 */
2959 { AA32(LO), Op1( 0), CRn(10), CRm( 2), Op2( 0), access_vm_reg, NULL, MAIR_EL1 },
2960 /* NMRR/MAIR1 */
2961 { AA32(HI), Op1( 0), CRn(10), CRm( 2), Op2( 1), access_vm_reg, NULL, MAIR_EL1 },
2962 /* AMAIR0 */
2963 { AA32(LO), Op1( 0), CRn(10), CRm( 3), Op2( 0), access_vm_reg, NULL, AMAIR_EL1 },
2964 /* AMAIR1 */
2965 { AA32(HI), Op1( 0), CRn(10), CRm( 3), Op2( 1), access_vm_reg, NULL, AMAIR_EL1 },
db7dedd0
CD
2966
2967 /* ICC_SRE */
f7f6f2d9 2968 { Op1( 0), CRn(12), CRm(12), Op2( 5), access_gic_sre },
db7dedd0 2969
b1ea1d76 2970 { Op1( 0), CRn(13), CRm( 0), Op2( 1), access_vm_reg, NULL, CONTEXTIDR_EL1 },
051ff581 2971
84135d3d
AP
2972 /* Arch Tmers */
2973 { SYS_DESC(SYS_AARCH32_CNTP_TVAL), access_arch_timer },
2974 { SYS_DESC(SYS_AARCH32_CNTP_CTL), access_arch_timer },
eac137b4 2975
051ff581
SZ
2976 /* PMEVCNTRn */
2977 PMU_PMEVCNTR(0),
2978 PMU_PMEVCNTR(1),
2979 PMU_PMEVCNTR(2),
2980 PMU_PMEVCNTR(3),
2981 PMU_PMEVCNTR(4),
2982 PMU_PMEVCNTR(5),
2983 PMU_PMEVCNTR(6),
2984 PMU_PMEVCNTR(7),
2985 PMU_PMEVCNTR(8),
2986 PMU_PMEVCNTR(9),
2987 PMU_PMEVCNTR(10),
2988 PMU_PMEVCNTR(11),
2989 PMU_PMEVCNTR(12),
2990 PMU_PMEVCNTR(13),
2991 PMU_PMEVCNTR(14),
2992 PMU_PMEVCNTR(15),
2993 PMU_PMEVCNTR(16),
2994 PMU_PMEVCNTR(17),
2995 PMU_PMEVCNTR(18),
2996 PMU_PMEVCNTR(19),
2997 PMU_PMEVCNTR(20),
2998 PMU_PMEVCNTR(21),
2999 PMU_PMEVCNTR(22),
3000 PMU_PMEVCNTR(23),
3001 PMU_PMEVCNTR(24),
3002 PMU_PMEVCNTR(25),
3003 PMU_PMEVCNTR(26),
3004 PMU_PMEVCNTR(27),
3005 PMU_PMEVCNTR(28),
3006 PMU_PMEVCNTR(29),
3007 PMU_PMEVCNTR(30),
9feb21ac
SZ
3008 /* PMEVTYPERn */
3009 PMU_PMEVTYPER(0),
3010 PMU_PMEVTYPER(1),
3011 PMU_PMEVTYPER(2),
3012 PMU_PMEVTYPER(3),
3013 PMU_PMEVTYPER(4),
3014 PMU_PMEVTYPER(5),
3015 PMU_PMEVTYPER(6),
3016 PMU_PMEVTYPER(7),
3017 PMU_PMEVTYPER(8),
3018 PMU_PMEVTYPER(9),
3019 PMU_PMEVTYPER(10),
3020 PMU_PMEVTYPER(11),
3021 PMU_PMEVTYPER(12),
3022 PMU_PMEVTYPER(13),
3023 PMU_PMEVTYPER(14),
3024 PMU_PMEVTYPER(15),
3025 PMU_PMEVTYPER(16),
3026 PMU_PMEVTYPER(17),
3027 PMU_PMEVTYPER(18),
3028 PMU_PMEVTYPER(19),
3029 PMU_PMEVTYPER(20),
3030 PMU_PMEVTYPER(21),
3031 PMU_PMEVTYPER(22),
3032 PMU_PMEVTYPER(23),
3033 PMU_PMEVTYPER(24),
3034 PMU_PMEVTYPER(25),
3035 PMU_PMEVTYPER(26),
3036 PMU_PMEVTYPER(27),
3037 PMU_PMEVTYPER(28),
3038 PMU_PMEVTYPER(29),
3039 PMU_PMEVTYPER(30),
3040 /* PMCCFILTR */
a9e192cd 3041 { CP15_PMU_SYS_REG(DIRECT, 0, 14, 15, 7), .access = access_pmu_evtyper },
f7f2b15c
AB
3042
3043 { Op1(1), CRn( 0), CRm( 0), Op2(0), access_ccsidr },
3044 { Op1(1), CRn( 0), CRm( 0), Op2(1), access_clidr },
bf48040c
AO
3045
3046 /* CCSIDR2 */
3047 { Op1(1), CRn( 0), CRm( 0), Op2(2), undef_access },
3048
b1ea1d76 3049 { Op1(2), CRn( 0), CRm( 0), Op2(0), access_csselr, NULL, CSSELR_EL1 },
a9866ba0
MZ
3050};
3051
3052static const struct sys_reg_desc cp15_64_regs[] = {
b1ea1d76 3053 { Op1( 0), CRn( 0), CRm( 2), Op2( 0), access_vm_reg, NULL, TTBR0_EL1 },
a9e192cd 3054 { CP15_PMU_SYS_REG(DIRECT, 0, 0, 9, 0), .access = access_pmu_evcntr },
03bd646d 3055 { Op1( 0), CRn( 0), CRm(12), Op2( 0), access_gic_sgi }, /* ICC_SGI1R */
c605ee24 3056 { SYS_DESC(SYS_AARCH32_CNTPCT), access_arch_timer },
b1ea1d76 3057 { Op1( 1), CRn( 0), CRm( 2), Op2( 0), access_vm_reg, NULL, TTBR1_EL1 },
03bd646d
MZ
3058 { Op1( 1), CRn( 0), CRm(12), Op2( 0), access_gic_sgi }, /* ICC_ASGI1R */
3059 { Op1( 2), CRn( 0), CRm(12), Op2( 0), access_gic_sgi }, /* ICC_SGI0R */
84135d3d 3060 { SYS_DESC(SYS_AARCH32_CNTP_CVAL), access_arch_timer },
a6610435 3061 { SYS_DESC(SYS_AARCH32_CNTPCTSS), access_arch_timer },
7c8c5e6a
MZ
3062};
3063
f1f0c0cf
AE
3064static bool check_sysreg_table(const struct sys_reg_desc *table, unsigned int n,
3065 bool is_32)
bb44a8db
MZ
3066{
3067 unsigned int i;
3068
3069 for (i = 0; i < n; i++) {
3070 if (!is_32 && table[i].reg && !table[i].reset) {
325031d4 3071 kvm_err("sys_reg table %pS entry %d lacks reset\n", &table[i], i);
f1f0c0cf 3072 return false;
bb44a8db
MZ
3073 }
3074
3075 if (i && cmp_sys_reg(&table[i-1], &table[i]) >= 0) {
325031d4 3076 kvm_err("sys_reg table %pS entry %d out of order\n", &table[i - 1], i - 1);
f1f0c0cf 3077 return false;
bb44a8db
MZ
3078 }
3079 }
3080
f1f0c0cf 3081 return true;
bb44a8db
MZ
3082}
3083
74cc7e0c 3084int kvm_handle_cp14_load_store(struct kvm_vcpu *vcpu)
62a89c44
MZ
3085{
3086 kvm_inject_undefined(vcpu);
3087 return 1;
3088}
3089
e70b9522
MZ
3090static void perform_access(struct kvm_vcpu *vcpu,
3091 struct sys_reg_params *params,
3092 const struct sys_reg_desc *r)
3093{
599d79dc
MZ
3094 trace_kvm_sys_access(*vcpu_pc(vcpu), params, r);
3095
7f34e409 3096 /* Check for regs disabled by runtime config */
01fe5ace 3097 if (sysreg_hidden(vcpu, r)) {
7f34e409
DM
3098 kvm_inject_undefined(vcpu);
3099 return;
3100 }
3101
e70b9522
MZ
3102 /*
3103 * Not having an accessor means that we have configured a trap
3104 * that we don't know how to handle. This certainly qualifies
3105 * as a gross bug that should be fixed right away.
3106 */
3107 BUG_ON(!r->access);
3108
3109 /* Skip instruction if instructed so */
3110 if (likely(r->access(vcpu, params, r)))
cdb5e02e 3111 kvm_incr_pc(vcpu);
e70b9522
MZ
3112}
3113
72564016
MZ
3114/*
3115 * emulate_cp -- tries to match a sys_reg access in a handling table, and
3116 * call the corresponding trap handler.
3117 *
3118 * @params: pointer to the descriptor of the access
3119 * @table: array of trap descriptors
3120 * @num: size of the trap descriptor array
3121 *
001bb819 3122 * Return true if the access has been handled, false if not.
72564016 3123 */
001bb819
OU
3124static bool emulate_cp(struct kvm_vcpu *vcpu,
3125 struct sys_reg_params *params,
3126 const struct sys_reg_desc *table,
3127 size_t num)
62a89c44 3128{
72564016 3129 const struct sys_reg_desc *r;
62a89c44 3130
72564016 3131 if (!table)
001bb819 3132 return false; /* Not handled */
62a89c44 3133
62a89c44 3134 r = find_reg(params, table, num);
62a89c44 3135
72564016 3136 if (r) {
e70b9522 3137 perform_access(vcpu, params, r);
001bb819 3138 return true;
72564016
MZ
3139 }
3140
3141 /* Not handled */
001bb819 3142 return false;
72564016
MZ
3143}
3144
3145static void unhandled_cp_access(struct kvm_vcpu *vcpu,
3146 struct sys_reg_params *params)
3147{
3a949f4c 3148 u8 esr_ec = kvm_vcpu_trap_get_class(vcpu);
40c4f8d2 3149 int cp = -1;
72564016 3150
3a949f4c 3151 switch (esr_ec) {
c6d01a94
MR
3152 case ESR_ELx_EC_CP15_32:
3153 case ESR_ELx_EC_CP15_64:
72564016
MZ
3154 cp = 15;
3155 break;
c6d01a94
MR
3156 case ESR_ELx_EC_CP14_MR:
3157 case ESR_ELx_EC_CP14_64:
72564016
MZ
3158 cp = 14;
3159 break;
3160 default:
40c4f8d2 3161 WARN_ON(1);
62a89c44
MZ
3162 }
3163
bf4b96bb
MR
3164 print_sys_reg_msg(params,
3165 "Unsupported guest CP%d access at: %08lx [%08lx]\n",
3166 cp, *vcpu_pc(vcpu), *vcpu_cpsr(vcpu));
62a89c44
MZ
3167 kvm_inject_undefined(vcpu);
3168}
3169
3170/**
7769db90 3171 * kvm_handle_cp_64 -- handles a mrrc/mcrr trap on a guest CP14/CP15 access
62a89c44
MZ
3172 * @vcpu: The VCPU pointer
3173 * @run: The kvm_run struct
3174 */
72564016
MZ
3175static int kvm_handle_cp_64(struct kvm_vcpu *vcpu,
3176 const struct sys_reg_desc *global,
dcaffa7b 3177 size_t nr_global)
62a89c44
MZ
3178{
3179 struct sys_reg_params params;
0b12620f 3180 u64 esr = kvm_vcpu_get_esr(vcpu);
c667186f 3181 int Rt = kvm_vcpu_sys_get_rt(vcpu);
3a949f4c 3182 int Rt2 = (esr >> 10) & 0x1f;
62a89c44 3183
3a949f4c
GS
3184 params.CRm = (esr >> 1) & 0xf;
3185 params.is_write = ((esr & 1) == 0);
62a89c44
MZ
3186
3187 params.Op0 = 0;
3a949f4c 3188 params.Op1 = (esr >> 16) & 0xf;
62a89c44
MZ
3189 params.Op2 = 0;
3190 params.CRn = 0;
3191
3192 /*
2ec5be3d 3193 * Make a 64-bit value out of Rt and Rt2. As we use the same trap
62a89c44
MZ
3194 * backends between AArch32 and AArch64, we get away with it.
3195 */
3196 if (params.is_write) {
2ec5be3d
PF
3197 params.regval = vcpu_get_reg(vcpu, Rt) & 0xffffffff;
3198 params.regval |= vcpu_get_reg(vcpu, Rt2) << 32;
62a89c44
MZ
3199 }
3200
b6b7a806 3201 /*
dcaffa7b 3202 * If the table contains a handler, handle the
b6b7a806
MZ
3203 * potential register operation in the case of a read and return
3204 * with success.
3205 */
001bb819 3206 if (emulate_cp(vcpu, &params, global, nr_global)) {
b6b7a806
MZ
3207 /* Split up the value between registers for the read side */
3208 if (!params.is_write) {
3209 vcpu_set_reg(vcpu, Rt, lower_32_bits(params.regval));
3210 vcpu_set_reg(vcpu, Rt2, upper_32_bits(params.regval));
3211 }
62a89c44 3212
b6b7a806 3213 return 1;
62a89c44
MZ
3214 }
3215
b6b7a806 3216 unhandled_cp_access(vcpu, &params);
62a89c44
MZ
3217 return 1;
3218}
3219
e6519766
OU
3220static bool emulate_sys_reg(struct kvm_vcpu *vcpu, struct sys_reg_params *params);
3221
9369bc5c
OU
3222/*
3223 * The CP10 ID registers are architecturally mapped to AArch64 feature
3224 * registers. Abuse that fact so we can rely on the AArch64 handler for accesses
3225 * from AArch32.
3226 */
ee87a9bd 3227static bool kvm_esr_cp10_id_to_sys64(u64 esr, struct sys_reg_params *params)
9369bc5c
OU
3228{
3229 u8 reg_id = (esr >> 10) & 0xf;
3230 bool valid;
3231
3232 params->is_write = ((esr & 1) == 0);
3233 params->Op0 = 3;
3234 params->Op1 = 0;
3235 params->CRn = 0;
3236 params->CRm = 3;
3237
3238 /* CP10 ID registers are read-only */
3239 valid = !params->is_write;
3240
3241 switch (reg_id) {
3242 /* MVFR0 */
3243 case 0b0111:
3244 params->Op2 = 0;
3245 break;
3246 /* MVFR1 */
3247 case 0b0110:
3248 params->Op2 = 1;
3249 break;
3250 /* MVFR2 */
3251 case 0b0101:
3252 params->Op2 = 2;
3253 break;
3254 default:
3255 valid = false;
3256 }
3257
3258 if (valid)
3259 return true;
3260
3261 kvm_pr_unimpl("Unhandled cp10 register %s: %u\n",
3262 params->is_write ? "write" : "read", reg_id);
3263 return false;
3264}
3265
3266/**
3267 * kvm_handle_cp10_id() - Handles a VMRS trap on guest access to a 'Media and
3268 * VFP Register' from AArch32.
3269 * @vcpu: The vCPU pointer
3270 *
3271 * MVFR{0-2} are architecturally mapped to the AArch64 MVFR{0-2}_EL1 registers.
3272 * Work out the correct AArch64 system register encoding and reroute to the
3273 * AArch64 system register emulation.
3274 */
3275int kvm_handle_cp10_id(struct kvm_vcpu *vcpu)
3276{
3277 int Rt = kvm_vcpu_sys_get_rt(vcpu);
ee87a9bd 3278 u64 esr = kvm_vcpu_get_esr(vcpu);
9369bc5c
OU
3279 struct sys_reg_params params;
3280
3281 /* UNDEF on any unhandled register access */
3282 if (!kvm_esr_cp10_id_to_sys64(esr, &params)) {
3283 kvm_inject_undefined(vcpu);
3284 return 1;
3285 }
3286
3287 if (emulate_sys_reg(vcpu, &params))
3288 vcpu_set_reg(vcpu, Rt, params.regval);
3289
3290 return 1;
3291}
3292
e6519766
OU
3293/**
3294 * kvm_emulate_cp15_id_reg() - Handles an MRC trap on a guest CP15 access where
3295 * CRn=0, which corresponds to the AArch32 feature
3296 * registers.
3297 * @vcpu: the vCPU pointer
3298 * @params: the system register access parameters.
3299 *
3300 * Our cp15 system register tables do not enumerate the AArch32 feature
3301 * registers. Conveniently, our AArch64 table does, and the AArch32 system
3302 * register encoding can be trivially remapped into the AArch64 for the feature
3303 * registers: Append op0=3, leaving op1, CRn, CRm, and op2 the same.
3304 *
3305 * According to DDI0487G.b G7.3.1, paragraph "Behavior of VMSAv8-32 32-bit
3306 * System registers with (coproc=0b1111, CRn==c0)", read accesses from this
3307 * range are either UNKNOWN or RES0. Rerouting remains architectural as we
3308 * treat undefined registers in this range as RAZ.
3309 */
3310static int kvm_emulate_cp15_id_reg(struct kvm_vcpu *vcpu,
3311 struct sys_reg_params *params)
3312{
3313 int Rt = kvm_vcpu_sys_get_rt(vcpu);
3314
3315 /* Treat impossible writes to RO registers as UNDEFINED */
3316 if (params->is_write) {
3317 unhandled_cp_access(vcpu, params);
3318 return 1;
3319 }
3320
3321 params->Op0 = 3;
3322
3323 /*
3324 * All registers where CRm > 3 are known to be UNKNOWN/RAZ from AArch32.
3325 * Avoid conflicting with future expansion of AArch64 feature registers
3326 * and simply treat them as RAZ here.
3327 */
3328 if (params->CRm > 3)
3329 params->regval = 0;
3330 else if (!emulate_sys_reg(vcpu, params))
3331 return 1;
3332
3333 vcpu_set_reg(vcpu, Rt, params->regval);
3334 return 1;
3335}
3336
62a89c44 3337/**
7769db90 3338 * kvm_handle_cp_32 -- handles a mrc/mcr trap on a guest CP14/CP15 access
62a89c44
MZ
3339 * @vcpu: The VCPU pointer
3340 * @run: The kvm_run struct
3341 */
72564016 3342static int kvm_handle_cp_32(struct kvm_vcpu *vcpu,
e6519766 3343 struct sys_reg_params *params,
72564016 3344 const struct sys_reg_desc *global,
dcaffa7b 3345 size_t nr_global)
62a89c44 3346{
c667186f 3347 int Rt = kvm_vcpu_sys_get_rt(vcpu);
62a89c44 3348
e6519766 3349 params->regval = vcpu_get_reg(vcpu, Rt);
62a89c44 3350
e6519766
OU
3351 if (emulate_cp(vcpu, params, global, nr_global)) {
3352 if (!params->is_write)
3353 vcpu_set_reg(vcpu, Rt, params->regval);
72564016 3354 return 1;
2ec5be3d 3355 }
72564016 3356
e6519766 3357 unhandled_cp_access(vcpu, params);
62a89c44
MZ
3358 return 1;
3359}
3360
74cc7e0c 3361int kvm_handle_cp15_64(struct kvm_vcpu *vcpu)
72564016 3362{
dcaffa7b 3363 return kvm_handle_cp_64(vcpu, cp15_64_regs, ARRAY_SIZE(cp15_64_regs));
72564016
MZ
3364}
3365
74cc7e0c 3366int kvm_handle_cp15_32(struct kvm_vcpu *vcpu)
72564016 3367{
e6519766
OU
3368 struct sys_reg_params params;
3369
3370 params = esr_cp1x_32_to_params(kvm_vcpu_get_esr(vcpu));
3371
3372 /*
3373 * Certain AArch32 ID registers are handled by rerouting to the AArch64
3374 * system register table. Registers in the ID range where CRm=0 are
3375 * excluded from this scheme as they do not trivially map into AArch64
3376 * system register encodings.
3377 */
3378 if (params.Op1 == 0 && params.CRn == 0 && params.CRm)
3379 return kvm_emulate_cp15_id_reg(vcpu, &params);
3380
3381 return kvm_handle_cp_32(vcpu, &params, cp15_regs, ARRAY_SIZE(cp15_regs));
72564016
MZ
3382}
3383
74cc7e0c 3384int kvm_handle_cp14_64(struct kvm_vcpu *vcpu)
72564016 3385{
dcaffa7b 3386 return kvm_handle_cp_64(vcpu, cp14_64_regs, ARRAY_SIZE(cp14_64_regs));
72564016
MZ
3387}
3388
74cc7e0c 3389int kvm_handle_cp14_32(struct kvm_vcpu *vcpu)
72564016 3390{
e6519766
OU
3391 struct sys_reg_params params;
3392
3393 params = esr_cp1x_32_to_params(kvm_vcpu_get_esr(vcpu));
3394
3395 return kvm_handle_cp_32(vcpu, &params, cp14_regs, ARRAY_SIZE(cp14_regs));
72564016
MZ
3396}
3397
28eda7b5
OU
3398/**
3399 * emulate_sys_reg - Emulate a guest access to an AArch64 system register
3400 * @vcpu: The VCPU pointer
3401 * @params: Decoded system register parameters
3402 *
3403 * Return: true if the system register access was successful, false otherwise.
3404 */
3405static bool emulate_sys_reg(struct kvm_vcpu *vcpu,
cc5f84fb 3406 struct sys_reg_params *params)
7c8c5e6a 3407{
dcaffa7b 3408 const struct sys_reg_desc *r;
7c8c5e6a 3409
dcaffa7b 3410 r = find_reg(params, sys_reg_descs, ARRAY_SIZE(sys_reg_descs));
7c8c5e6a 3411 if (likely(r)) {
e70b9522 3412 perform_access(vcpu, params, r);
28eda7b5
OU
3413 return true;
3414 }
3415
cc5f84fb
MZ
3416 print_sys_reg_msg(params,
3417 "Unsupported guest sys_reg access at: %lx [%08lx]\n",
3418 *vcpu_pc(vcpu), *vcpu_cpsr(vcpu));
3419 kvm_inject_undefined(vcpu);
89bc63fa 3420
cc5f84fb 3421 return false;
89bc63fa
MZ
3422}
3423
47334146
JZ
3424static void kvm_reset_id_regs(struct kvm_vcpu *vcpu)
3425{
3426 const struct sys_reg_desc *idreg = first_idreg;
3427 u32 id = reg_to_encoding(idreg);
3428 struct kvm *kvm = vcpu->kvm;
3429
3430 if (test_bit(KVM_ARCH_FLAG_ID_REGS_INITIALIZED, &kvm->arch.flags))
3431 return;
3432
3433 lockdep_assert_held(&kvm->arch.config_lock);
3434
3435 /* Initialize all idregs */
3436 while (is_id_reg(id)) {
3437 IDREG(kvm, id) = idreg->reset(vcpu, idreg);
3438
3439 idreg++;
3440 id = reg_to_encoding(idreg);
3441 }
3442
3443 set_bit(KVM_ARCH_FLAG_ID_REGS_INITIALIZED, &kvm->arch.flags);
3444}
3445
750ed566
JM
3446/**
3447 * kvm_reset_sys_regs - sets system registers to reset value
3448 * @vcpu: The VCPU pointer
3449 *
3450 * This function finds the right table above and sets the registers on the
3451 * virtual CPU struct to their architecturally defined reset values.
3452 */
3453void kvm_reset_sys_regs(struct kvm_vcpu *vcpu)
7c8c5e6a
MZ
3454{
3455 unsigned long i;
3456
47334146
JZ
3457 kvm_reset_id_regs(vcpu);
3458
3459 for (i = 0; i < ARRAY_SIZE(sys_reg_descs); i++) {
3460 const struct sys_reg_desc *r = &sys_reg_descs[i];
3461
3462 if (is_id_reg(reg_to_encoding(r)))
3463 continue;
3464
3465 if (r->reset)
3466 r->reset(vcpu, r);
3467 }
7c8c5e6a
MZ
3468}
3469
3470/**
89bc63fa
MZ
3471 * kvm_handle_sys_reg -- handles a system instruction or mrs/msr instruction
3472 * trap on a guest execution
7c8c5e6a 3473 * @vcpu: The VCPU pointer
7c8c5e6a 3474 */
74cc7e0c 3475int kvm_handle_sys_reg(struct kvm_vcpu *vcpu)
7c8c5e6a 3476{
cc5f84fb 3477 const struct sys_reg_desc *desc = NULL;
7c8c5e6a 3478 struct sys_reg_params params;
3a949f4c 3479 unsigned long esr = kvm_vcpu_get_esr(vcpu);
c667186f 3480 int Rt = kvm_vcpu_sys_get_rt(vcpu);
cc5f84fb 3481 int sr_idx;
7c8c5e6a 3482
eef8c85a
AB
3483 trace_kvm_handle_sys_reg(esr);
3484
085eabaa 3485 if (triage_sysreg_trap(vcpu, &sr_idx))
e58ec47b
MZ
3486 return 1;
3487
f76f89e2 3488 params = esr_sys64_to_params(esr);
2ec5be3d 3489 params.regval = vcpu_get_reg(vcpu, Rt);
7c8c5e6a 3490
89bc63fa 3491 /* System registers have Op0=={2,3}, as per DDI487 J.a C5.1.2 */
cc5f84fb
MZ
3492 if (params.Op0 == 2 || params.Op0 == 3)
3493 desc = &sys_reg_descs[sr_idx];
3494 else
3495 desc = &sys_insn_descs[sr_idx];
89bc63fa 3496
cc5f84fb 3497 perform_access(vcpu, &params, desc);
89bc63fa 3498
cc5f84fb
MZ
3499 /* Read from system register? */
3500 if (!params.is_write &&
3501 (params.Op0 == 2 || params.Op0 == 3))
3502 vcpu_set_reg(vcpu, Rt, params.regval);
2ec5be3d 3503
cc5f84fb 3504 return 1;
7c8c5e6a
MZ
3505}
3506
3507/******************************************************************************
3508 * Userspace API
3509 *****************************************************************************/
3510
3511static bool index_to_params(u64 id, struct sys_reg_params *params)
3512{
3513 switch (id & KVM_REG_SIZE_MASK) {
3514 case KVM_REG_SIZE_U64:
3515 /* Any unused index bits means it's not valid. */
3516 if (id & ~(KVM_REG_ARCH_MASK | KVM_REG_SIZE_MASK
3517 | KVM_REG_ARM_COPROC_MASK
3518 | KVM_REG_ARM64_SYSREG_OP0_MASK
3519 | KVM_REG_ARM64_SYSREG_OP1_MASK
3520 | KVM_REG_ARM64_SYSREG_CRN_MASK
3521 | KVM_REG_ARM64_SYSREG_CRM_MASK
3522 | KVM_REG_ARM64_SYSREG_OP2_MASK))
3523 return false;
3524 params->Op0 = ((id & KVM_REG_ARM64_SYSREG_OP0_MASK)
3525 >> KVM_REG_ARM64_SYSREG_OP0_SHIFT);
3526 params->Op1 = ((id & KVM_REG_ARM64_SYSREG_OP1_MASK)
3527 >> KVM_REG_ARM64_SYSREG_OP1_SHIFT);
3528 params->CRn = ((id & KVM_REG_ARM64_SYSREG_CRN_MASK)
3529 >> KVM_REG_ARM64_SYSREG_CRN_SHIFT);
3530 params->CRm = ((id & KVM_REG_ARM64_SYSREG_CRM_MASK)
3531 >> KVM_REG_ARM64_SYSREG_CRM_SHIFT);
3532 params->Op2 = ((id & KVM_REG_ARM64_SYSREG_OP2_MASK)
3533 >> KVM_REG_ARM64_SYSREG_OP2_SHIFT);
3534 return true;
3535 default:
3536 return false;
3537 }
3538}
3539
da8d120f
MZ
3540const struct sys_reg_desc *get_reg_by_id(u64 id,
3541 const struct sys_reg_desc table[],
3542 unsigned int num)
4b927b94 3543{
da8d120f
MZ
3544 struct sys_reg_params params;
3545
3546 if (!index_to_params(id, &params))
4b927b94
VK
3547 return NULL;
3548
da8d120f 3549 return find_reg(&params, table, num);
4b927b94
VK
3550}
3551
7c8c5e6a 3552/* Decode an index value, and find the sys_reg_desc entry. */
ba23aec9
MZ
3553static const struct sys_reg_desc *
3554id_to_sys_reg_desc(struct kvm_vcpu *vcpu, u64 id,
3555 const struct sys_reg_desc table[], unsigned int num)
3556
7c8c5e6a 3557{
dcaffa7b 3558 const struct sys_reg_desc *r;
7c8c5e6a
MZ
3559
3560 /* We only do sys_reg for now. */
3561 if ((id & KVM_REG_ARM_COPROC_MASK) != KVM_REG_ARM64_SYSREG)
3562 return NULL;
3563
ba23aec9 3564 r = get_reg_by_id(id, table, num);
7c8c5e6a 3565
93390c0a 3566 /* Not saved in the sys_reg array and not otherwise accessible? */
ba23aec9 3567 if (r && (!(r->reg || r->get_user) || sysreg_hidden(vcpu, r)))
7c8c5e6a
MZ
3568 r = NULL;
3569
3570 return r;
3571}
3572
3573/*
3574 * These are the invariant sys_reg registers: we let the guest see the
3575 * host versions of these, so they're part of the guest state.
3576 *
3577 * A future CPU may provide a mechanism to present different values to
3578 * the guest, or a future kvm may trap them.
3579 */
3580
3581#define FUNCTION_INVARIANT(reg) \
d86cde6e 3582 static u64 get_##reg(struct kvm_vcpu *v, \
7c8c5e6a
MZ
3583 const struct sys_reg_desc *r) \
3584 { \
1f3d8699 3585 ((struct sys_reg_desc *)r)->val = read_sysreg(reg); \
d86cde6e 3586 return ((struct sys_reg_desc *)r)->val; \
7c8c5e6a
MZ
3587 }
3588
3589FUNCTION_INVARIANT(midr_el1)
7c8c5e6a 3590FUNCTION_INVARIANT(revidr_el1)
7c8c5e6a
MZ
3591FUNCTION_INVARIANT(aidr_el1)
3592
d86cde6e 3593static u64 get_ctr_el0(struct kvm_vcpu *v, const struct sys_reg_desc *r)
f7f2b15c
AB
3594{
3595 ((struct sys_reg_desc *)r)->val = read_sanitised_ftr_reg(SYS_CTR_EL0);
d86cde6e 3596 return ((struct sys_reg_desc *)r)->val;
f7f2b15c
AB
3597}
3598
7c8c5e6a 3599/* ->val is filled in by kvm_sys_reg_table_init() */
8d20bd63 3600static struct sys_reg_desc invariant_sys_regs[] __ro_after_init = {
0d449541
MR
3601 { SYS_DESC(SYS_MIDR_EL1), NULL, get_midr_el1 },
3602 { SYS_DESC(SYS_REVIDR_EL1), NULL, get_revidr_el1 },
0d449541
MR
3603 { SYS_DESC(SYS_AIDR_EL1), NULL, get_aidr_el1 },
3604 { SYS_DESC(SYS_CTR_EL0), NULL, get_ctr_el0 },
7c8c5e6a
MZ
3605};
3606
5a420ed9 3607static int get_invariant_sys_reg(u64 id, u64 __user *uaddr)
7c8c5e6a 3608{
7c8c5e6a
MZ
3609 const struct sys_reg_desc *r;
3610
da8d120f
MZ
3611 r = get_reg_by_id(id, invariant_sys_regs,
3612 ARRAY_SIZE(invariant_sys_regs));
7c8c5e6a
MZ
3613 if (!r)
3614 return -ENOENT;
3615
5a420ed9 3616 return put_user(r->val, uaddr);
7c8c5e6a
MZ
3617}
3618
5a420ed9 3619static int set_invariant_sys_reg(u64 id, u64 __user *uaddr)
7c8c5e6a 3620{
7c8c5e6a 3621 const struct sys_reg_desc *r;
5a420ed9 3622 u64 val;
7c8c5e6a 3623
da8d120f
MZ
3624 r = get_reg_by_id(id, invariant_sys_regs,
3625 ARRAY_SIZE(invariant_sys_regs));
7c8c5e6a
MZ
3626 if (!r)
3627 return -ENOENT;
3628
5a420ed9
MZ
3629 if (get_user(val, uaddr))
3630 return -EFAULT;
7c8c5e6a
MZ
3631
3632 /* This is what we mean by invariant: you can't change it. */
3633 if (r->val != val)
3634 return -EINVAL;
3635
3636 return 0;
3637}
3638
7af0c253 3639static int demux_c15_get(struct kvm_vcpu *vcpu, u64 id, void __user *uaddr)
7c8c5e6a
MZ
3640{
3641 u32 val;
3642 u32 __user *uval = uaddr;
3643
3644 /* Fail if we have unknown bits set. */
3645 if (id & ~(KVM_REG_ARCH_MASK|KVM_REG_SIZE_MASK|KVM_REG_ARM_COPROC_MASK
3646 | ((1 << KVM_REG_ARM_COPROC_SHIFT)-1)))
3647 return -ENOENT;
3648
3649 switch (id & KVM_REG_ARM_DEMUX_ID_MASK) {
3650 case KVM_REG_ARM_DEMUX_ID_CCSIDR:
3651 if (KVM_REG_SIZE(id) != 4)
3652 return -ENOENT;
3653 val = (id & KVM_REG_ARM_DEMUX_VAL_MASK)
3654 >> KVM_REG_ARM_DEMUX_VAL_SHIFT;
7af0c253 3655 if (val >= CSSELR_MAX)
7c8c5e6a
MZ
3656 return -ENOENT;
3657
7af0c253 3658 return put_user(get_ccsidr(vcpu, val), uval);
7c8c5e6a
MZ
3659 default:
3660 return -ENOENT;
3661 }
3662}
3663
7af0c253 3664static int demux_c15_set(struct kvm_vcpu *vcpu, u64 id, void __user *uaddr)
7c8c5e6a
MZ
3665{
3666 u32 val, newval;
3667 u32 __user *uval = uaddr;
3668
3669 /* Fail if we have unknown bits set. */
3670 if (id & ~(KVM_REG_ARCH_MASK|KVM_REG_SIZE_MASK|KVM_REG_ARM_COPROC_MASK
3671 | ((1 << KVM_REG_ARM_COPROC_SHIFT)-1)))
3672 return -ENOENT;
3673
3674 switch (id & KVM_REG_ARM_DEMUX_ID_MASK) {
3675 case KVM_REG_ARM_DEMUX_ID_CCSIDR:
3676 if (KVM_REG_SIZE(id) != 4)
3677 return -ENOENT;
3678 val = (id & KVM_REG_ARM_DEMUX_VAL_MASK)
3679 >> KVM_REG_ARM_DEMUX_VAL_SHIFT;
7af0c253 3680 if (val >= CSSELR_MAX)
7c8c5e6a
MZ
3681 return -ENOENT;
3682
3683 if (get_user(newval, uval))
3684 return -EFAULT;
3685
7af0c253 3686 return set_ccsidr(vcpu, val, newval);
7c8c5e6a
MZ
3687 default:
3688 return -ENOENT;
3689 }
3690}
3691
ba23aec9
MZ
3692int kvm_sys_reg_get_user(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg,
3693 const struct sys_reg_desc table[], unsigned int num)
7c8c5e6a 3694{
978ceeb3 3695 u64 __user *uaddr = (u64 __user *)(unsigned long)reg->addr;
7c8c5e6a 3696 const struct sys_reg_desc *r;
978ceeb3
MZ
3697 u64 val;
3698 int ret;
ba23aec9
MZ
3699
3700 r = id_to_sys_reg_desc(vcpu, reg->id, table, num);
e6b367db 3701 if (!r || sysreg_hidden_user(vcpu, r))
ba23aec9
MZ
3702 return -ENOENT;
3703
978ceeb3
MZ
3704 if (r->get_user) {
3705 ret = (r->get_user)(vcpu, r, &val);
3706 } else {
3707 val = __vcpu_sys_reg(vcpu, r->reg);
3708 ret = 0;
3709 }
3710
3711 if (!ret)
3712 ret = put_user(val, uaddr);
ba23aec9 3713
978ceeb3 3714 return ret;
ba23aec9
MZ
3715}
3716
3717int kvm_arm_sys_reg_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
3718{
7c8c5e6a 3719 void __user *uaddr = (void __user *)(unsigned long)reg->addr;
1deeffb5 3720 int err;
7c8c5e6a
MZ
3721
3722 if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_DEMUX)
7af0c253 3723 return demux_c15_get(vcpu, reg->id, uaddr);
7c8c5e6a 3724
1deeffb5
MZ
3725 err = get_invariant_sys_reg(reg->id, uaddr);
3726 if (err != -ENOENT)
3727 return err;
7c8c5e6a 3728
ba23aec9
MZ
3729 return kvm_sys_reg_get_user(vcpu, reg,
3730 sys_reg_descs, ARRAY_SIZE(sys_reg_descs));
3731}
7c8c5e6a 3732
ba23aec9
MZ
3733int kvm_sys_reg_set_user(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg,
3734 const struct sys_reg_desc table[], unsigned int num)
3735{
978ceeb3 3736 u64 __user *uaddr = (u64 __user *)(unsigned long)reg->addr;
ba23aec9 3737 const struct sys_reg_desc *r;
978ceeb3
MZ
3738 u64 val;
3739 int ret;
3740
3741 if (get_user(val, uaddr))
3742 return -EFAULT;
ba23aec9
MZ
3743
3744 r = id_to_sys_reg_desc(vcpu, reg->id, table, num);
e6b367db 3745 if (!r || sysreg_hidden_user(vcpu, r))
7f34e409
DM
3746 return -ENOENT;
3747
4de06e4c
OU
3748 if (sysreg_user_write_ignore(vcpu, r))
3749 return 0;
3750
978ceeb3
MZ
3751 if (r->set_user) {
3752 ret = (r->set_user)(vcpu, r, val);
3753 } else {
3754 __vcpu_sys_reg(vcpu, r->reg) = val;
3755 ret = 0;
3756 }
84e690bf 3757
978ceeb3 3758 return ret;
7c8c5e6a
MZ
3759}
3760
3761int kvm_arm_sys_reg_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
3762{
7c8c5e6a 3763 void __user *uaddr = (void __user *)(unsigned long)reg->addr;
1deeffb5 3764 int err;
7c8c5e6a
MZ
3765
3766 if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_DEMUX)
7af0c253 3767 return demux_c15_set(vcpu, reg->id, uaddr);
7c8c5e6a 3768
1deeffb5
MZ
3769 err = set_invariant_sys_reg(reg->id, uaddr);
3770 if (err != -ENOENT)
3771 return err;
84e690bf 3772
ba23aec9
MZ
3773 return kvm_sys_reg_set_user(vcpu, reg,
3774 sys_reg_descs, ARRAY_SIZE(sys_reg_descs));
7c8c5e6a
MZ
3775}
3776
3777static unsigned int num_demux_regs(void)
3778{
7af0c253 3779 return CSSELR_MAX;
7c8c5e6a
MZ
3780}
3781
3782static int write_demux_regids(u64 __user *uindices)
3783{
efd48cea 3784 u64 val = KVM_REG_ARM64 | KVM_REG_SIZE_U32 | KVM_REG_ARM_DEMUX;
7c8c5e6a
MZ
3785 unsigned int i;
3786
3787 val |= KVM_REG_ARM_DEMUX_ID_CCSIDR;
3788 for (i = 0; i < CSSELR_MAX; i++) {
7c8c5e6a
MZ
3789 if (put_user(val | i, uindices))
3790 return -EFAULT;
3791 uindices++;
3792 }
3793 return 0;
3794}
3795
3796static u64 sys_reg_to_index(const struct sys_reg_desc *reg)
3797{
3798 return (KVM_REG_ARM64 | KVM_REG_SIZE_U64 |
3799 KVM_REG_ARM64_SYSREG |
3800 (reg->Op0 << KVM_REG_ARM64_SYSREG_OP0_SHIFT) |
3801 (reg->Op1 << KVM_REG_ARM64_SYSREG_OP1_SHIFT) |
3802 (reg->CRn << KVM_REG_ARM64_SYSREG_CRN_SHIFT) |
3803 (reg->CRm << KVM_REG_ARM64_SYSREG_CRM_SHIFT) |
3804 (reg->Op2 << KVM_REG_ARM64_SYSREG_OP2_SHIFT));
3805}
3806
3807static bool copy_reg_to_user(const struct sys_reg_desc *reg, u64 __user **uind)
3808{
3809 if (!*uind)
3810 return true;
3811
3812 if (put_user(sys_reg_to_index(reg), *uind))
3813 return false;
3814
3815 (*uind)++;
3816 return true;
3817}
3818
7f34e409
DM
3819static int walk_one_sys_reg(const struct kvm_vcpu *vcpu,
3820 const struct sys_reg_desc *rd,
93390c0a
DM
3821 u64 __user **uind,
3822 unsigned int *total)
3823{
3824 /*
3825 * Ignore registers we trap but don't save,
3826 * and for which no custom user accessor is provided.
3827 */
3828 if (!(rd->reg || rd->get_user))
3829 return 0;
3830
e6b367db 3831 if (sysreg_hidden_user(vcpu, rd))
7f34e409
DM
3832 return 0;
3833
93390c0a
DM
3834 if (!copy_reg_to_user(rd, uind))
3835 return -EFAULT;
3836
3837 (*total)++;
3838 return 0;
3839}
3840
7c8c5e6a
MZ
3841/* Assumed ordered tables, see kvm_sys_reg_table_init. */
3842static int walk_sys_regs(struct kvm_vcpu *vcpu, u64 __user *uind)
3843{
dcaffa7b 3844 const struct sys_reg_desc *i2, *end2;
7c8c5e6a 3845 unsigned int total = 0;
93390c0a 3846 int err;
7c8c5e6a 3847
7c8c5e6a
MZ
3848 i2 = sys_reg_descs;
3849 end2 = sys_reg_descs + ARRAY_SIZE(sys_reg_descs);
3850
dcaffa7b
JM
3851 while (i2 != end2) {
3852 err = walk_one_sys_reg(vcpu, i2++, &uind, &total);
93390c0a
DM
3853 if (err)
3854 return err;
7c8c5e6a
MZ
3855 }
3856 return total;
3857}
3858
3859unsigned long kvm_arm_num_sys_reg_descs(struct kvm_vcpu *vcpu)
3860{
3861 return ARRAY_SIZE(invariant_sys_regs)
3862 + num_demux_regs()
3863 + walk_sys_regs(vcpu, (u64 __user *)NULL);
3864}
3865
3866int kvm_arm_copy_sys_reg_indices(struct kvm_vcpu *vcpu, u64 __user *uindices)
3867{
3868 unsigned int i;
3869 int err;
3870
3871 /* Then give them all the invariant registers' indices. */
3872 for (i = 0; i < ARRAY_SIZE(invariant_sys_regs); i++) {
3873 if (put_user(sys_reg_to_index(&invariant_sys_regs[i]), uindices))
3874 return -EFAULT;
3875 uindices++;
3876 }
3877
3878 err = walk_sys_regs(vcpu, uindices);
3879 if (err < 0)
3880 return err;
3881 uindices += err;
3882
3883 return write_demux_regids(uindices);
3884}
3885
3f9cd0ca
JZ
3886#define KVM_ARM_FEATURE_ID_RANGE_INDEX(r) \
3887 KVM_ARM_FEATURE_ID_RANGE_IDX(sys_reg_Op0(r), \
3888 sys_reg_Op1(r), \
3889 sys_reg_CRn(r), \
3890 sys_reg_CRm(r), \
3891 sys_reg_Op2(r))
3892
3893static bool is_feature_id_reg(u32 encoding)
3894{
3895 return (sys_reg_Op0(encoding) == 3 &&
3896 (sys_reg_Op1(encoding) < 2 || sys_reg_Op1(encoding) == 3) &&
3897 sys_reg_CRn(encoding) == 0 &&
3898 sys_reg_CRm(encoding) <= 7);
3899}
3900
3901int kvm_vm_ioctl_get_reg_writable_masks(struct kvm *kvm, struct reg_mask_range *range)
3902{
3903 const void *zero_page = page_to_virt(ZERO_PAGE(0));
3904 u64 __user *masks = (u64 __user *)range->addr;
3905
3906 /* Only feature id range is supported, reserved[13] must be zero. */
3907 if (range->range ||
3908 memcmp(range->reserved, zero_page, sizeof(range->reserved)))
3909 return -EINVAL;
3910
3911 /* Wipe the whole thing first */
3912 if (clear_user(masks, KVM_ARM_FEATURE_ID_RANGE_SIZE * sizeof(__u64)))
3913 return -EFAULT;
3914
3915 for (int i = 0; i < ARRAY_SIZE(sys_reg_descs); i++) {
3916 const struct sys_reg_desc *reg = &sys_reg_descs[i];
3917 u32 encoding = reg_to_encoding(reg);
3918 u64 val;
3919
3920 if (!is_feature_id_reg(encoding) || !reg->set_user)
3921 continue;
3922
3923 /*
3924 * For ID registers, we return the writable mask. Other feature
3925 * registers return a full 64bit mask. That's not necessary
3926 * compliant with a given revision of the architecture, but the
3927 * RES0/RES1 definitions allow us to do that.
3928 */
3929 if (is_id_reg(encoding)) {
3930 if (!reg->val ||
3931 (is_aa32_id_reg(encoding) && !kvm_supports_32bit_el0()))
3932 continue;
3933 val = reg->val;
3934 } else {
3935 val = ~0UL;
3936 }
3937
3938 if (put_user(val, (masks + KVM_ARM_FEATURE_ID_RANGE_INDEX(encoding))))
3939 return -EFAULT;
3940 }
3941
3942 return 0;
3943}
3944
c5bac1ef
MZ
3945void kvm_init_sysreg(struct kvm_vcpu *vcpu)
3946{
3947 struct kvm *kvm = vcpu->kvm;
3948
3949 mutex_lock(&kvm->arch.config_lock);
3950
8ecdccb9
MZ
3951 /*
3952 * In the absence of FGT, we cannot independently trap TLBI
3953 * Range instructions. This isn't great, but trapping all
3954 * TLBIs would be far worse. Live with it...
3955 */
3956 if (!kvm_has_feat(kvm, ID_AA64ISAR0_EL1, TLB, OS))
3957 vcpu->arch.hcr_el2 |= HCR_TTLBOS;
3958
c5bac1ef
MZ
3959 if (test_bit(KVM_ARCH_FLAG_FGU_INITIALIZED, &kvm->arch.flags))
3960 goto out;
3961
3962 kvm->arch.fgu[HFGxTR_GROUP] = (HFGxTR_EL2_nAMAIR2_EL1 |
3963 HFGxTR_EL2_nMAIR2_EL1 |
3964 HFGxTR_EL2_nS2POR_EL1 |
3965 HFGxTR_EL2_nPOR_EL1 |
3966 HFGxTR_EL2_nPOR_EL0 |
3967 HFGxTR_EL2_nACCDATA_EL1 |
3968 HFGxTR_EL2_nSMPRI_EL1_MASK |
3969 HFGxTR_EL2_nTPIDR2_EL0_MASK);
3970
8ecdccb9
MZ
3971 if (!kvm_has_feat(kvm, ID_AA64ISAR0_EL1, TLB, OS))
3972 kvm->arch.fgu[HFGITR_GROUP] |= (HFGITR_EL2_TLBIRVAALE1OS|
3973 HFGITR_EL2_TLBIRVALE1OS |
3974 HFGITR_EL2_TLBIRVAAE1OS |
3975 HFGITR_EL2_TLBIRVAE1OS |
3976 HFGITR_EL2_TLBIVAALE1OS |
3977 HFGITR_EL2_TLBIVALE1OS |
3978 HFGITR_EL2_TLBIVAAE1OS |
3979 HFGITR_EL2_TLBIASIDE1OS |
3980 HFGITR_EL2_TLBIVAE1OS |
3981 HFGITR_EL2_TLBIVMALLE1OS);
3982
3983 if (!kvm_has_feat(kvm, ID_AA64ISAR0_EL1, TLB, RANGE))
3984 kvm->arch.fgu[HFGITR_GROUP] |= (HFGITR_EL2_TLBIRVAALE1 |
3985 HFGITR_EL2_TLBIRVALE1 |
3986 HFGITR_EL2_TLBIRVAAE1 |
3987 HFGITR_EL2_TLBIRVAE1 |
3988 HFGITR_EL2_TLBIRVAALE1IS|
3989 HFGITR_EL2_TLBIRVALE1IS |
3990 HFGITR_EL2_TLBIRVAAE1IS |
3991 HFGITR_EL2_TLBIRVAE1IS |
3992 HFGITR_EL2_TLBIRVAALE1OS|
3993 HFGITR_EL2_TLBIRVALE1OS |
3994 HFGITR_EL2_TLBIRVAAE1OS |
3995 HFGITR_EL2_TLBIRVAE1OS);
3996
58627b72
MZ
3997 if (!kvm_has_feat(kvm, ID_AA64MMFR3_EL1, S1PIE, IMP))
3998 kvm->arch.fgu[HFGxTR_GROUP] |= (HFGxTR_EL2_nPIRE0_EL1 |
3999 HFGxTR_EL2_nPIR_EL1);
4000
c5bac1ef
MZ
4001 set_bit(KVM_ARCH_FLAG_FGU_INITIALIZED, &kvm->arch.flags);
4002out:
4003 mutex_unlock(&kvm->arch.config_lock);
4004}
4005
8d20bd63 4006int __init kvm_sys_reg_table_init(void)
7c8c5e6a 4007{
47334146 4008 struct sys_reg_params params;
f1f0c0cf 4009 bool valid = true;
7c8c5e6a 4010 unsigned int i;
19f3e7ea 4011 int ret = 0;
7c8c5e6a
MZ
4012
4013 /* Make sure tables are unique and in order. */
f1f0c0cf
AE
4014 valid &= check_sysreg_table(sys_reg_descs, ARRAY_SIZE(sys_reg_descs), false);
4015 valid &= check_sysreg_table(cp14_regs, ARRAY_SIZE(cp14_regs), true);
4016 valid &= check_sysreg_table(cp14_64_regs, ARRAY_SIZE(cp14_64_regs), true);
4017 valid &= check_sysreg_table(cp15_regs, ARRAY_SIZE(cp15_regs), true);
4018 valid &= check_sysreg_table(cp15_64_regs, ARRAY_SIZE(cp15_64_regs), true);
4019 valid &= check_sysreg_table(invariant_sys_regs, ARRAY_SIZE(invariant_sys_regs), false);
89bc63fa 4020 valid &= check_sysreg_table(sys_insn_descs, ARRAY_SIZE(sys_insn_descs), false);
f1f0c0cf
AE
4021
4022 if (!valid)
4023 return -EINVAL;
7c8c5e6a
MZ
4024
4025 /* We abuse the reset function to overwrite the table itself. */
4026 for (i = 0; i < ARRAY_SIZE(invariant_sys_regs); i++)
4027 invariant_sys_regs[i].reset(NULL, &invariant_sys_regs[i]);
4028
47334146
JZ
4029 /* Find the first idreg (SYS_ID_PFR0_EL1) in sys_reg_descs. */
4030 params = encoding_to_params(SYS_ID_PFR0_EL1);
4031 first_idreg = find_reg(&params, sys_reg_descs, ARRAY_SIZE(sys_reg_descs));
4032 if (!first_idreg)
4033 return -EINVAL;
4034
19f3e7ea
MZ
4035 ret = populate_nv_trap_config();
4036
4037 for (i = 0; !ret && i < ARRAY_SIZE(sys_reg_descs); i++)
4038 ret = populate_sysreg_config(sys_reg_descs + i, i);
4039
4040 for (i = 0; !ret && i < ARRAY_SIZE(sys_insn_descs); i++)
4041 ret = populate_sysreg_config(sys_insn_descs + i, i);
4042
4043 return ret;
7c8c5e6a 4044}