KVM: arm64: Change 32-bit handling of VM system registers
[linux-2.6-block.git] / arch / arm64 / kvm / sys_regs.c
CommitLineData
7c8c5e6a
MZ
1/*
2 * Copyright (C) 2012,2013 - ARM Ltd
3 * Author: Marc Zyngier <marc.zyngier@arm.com>
4 *
5 * Derived from arch/arm/kvm/coproc.c:
6 * Copyright (C) 2012 - Virtual Open Systems and Columbia University
7 * Authors: Rusty Russell <rusty@rustcorp.com.au>
8 * Christoffer Dall <c.dall@virtualopensystems.com>
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License, version 2, as
12 * published by the Free Software Foundation.
13 *
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program. If not, see <http://www.gnu.org/licenses/>.
21 */
22
623eefa8 23#include <linux/bsearch.h>
7c8c5e6a 24#include <linux/kvm_host.h>
c6d01a94 25#include <linux/mm.h>
07d79fe7 26#include <linux/printk.h>
7c8c5e6a 27#include <linux/uaccess.h>
c6d01a94 28
7c8c5e6a
MZ
29#include <asm/cacheflush.h>
30#include <asm/cputype.h>
0c557ed4 31#include <asm/debug-monitors.h>
c6d01a94
MR
32#include <asm/esr.h>
33#include <asm/kvm_arm.h>
9d8415d6 34#include <asm/kvm_asm.h>
c6d01a94
MR
35#include <asm/kvm_coproc.h>
36#include <asm/kvm_emulate.h>
37#include <asm/kvm_host.h>
38#include <asm/kvm_mmu.h>
ab946834 39#include <asm/perf_event.h>
1f3d8699 40#include <asm/sysreg.h>
c6d01a94 41
7c8c5e6a
MZ
42#include <trace/events/kvm.h>
43
44#include "sys_regs.h"
45
eef8c85a
AB
46#include "trace.h"
47
7c8c5e6a
MZ
48/*
49 * All of this file is extremly similar to the ARM coproc.c, but the
50 * types are different. My gut feeling is that it should be pretty
51 * easy to merge, but that would be an ABI breakage -- again. VFP
52 * would also need to be abstracted.
62a89c44
MZ
53 *
54 * For AArch32, we only take care of what is being trapped. Anything
55 * that has to do with init and userspace access has to go via the
56 * 64bit interface.
7c8c5e6a
MZ
57 */
58
7b5b4df1 59static bool read_from_write_only(struct kvm_vcpu *vcpu,
e7f1d1ee
MZ
60 struct sys_reg_params *params,
61 const struct sys_reg_desc *r)
7b5b4df1
MZ
62{
63 WARN_ONCE(1, "Unexpected sys_reg read to write-only register\n");
64 print_sys_reg_instr(params);
65 kvm_inject_undefined(vcpu);
66 return false;
67}
68
7b1dba1f
MZ
69static bool write_to_read_only(struct kvm_vcpu *vcpu,
70 struct sys_reg_params *params,
71 const struct sys_reg_desc *r)
72{
73 WARN_ONCE(1, "Unexpected sys_reg write to read-only register\n");
74 print_sys_reg_instr(params);
75 kvm_inject_undefined(vcpu);
76 return false;
77}
78
7c8c5e6a
MZ
79/* 3 bits per cache level, as per CLIDR, but non-existent caches always 0 */
80static u32 cache_levels;
81
82/* CSSELR values; used to index KVM_REG_ARM_DEMUX_ID_CCSIDR */
83#define CSSELR_MAX 12
84
85/* Which cache CCSIDR represents depends on CSSELR value. */
86static u32 get_ccsidr(u32 csselr)
87{
88 u32 ccsidr;
89
90 /* Make sure noone else changes CSSELR during this! */
91 local_irq_disable();
1f3d8699 92 write_sysreg(csselr, csselr_el1);
7c8c5e6a 93 isb();
1f3d8699 94 ccsidr = read_sysreg(ccsidr_el1);
7c8c5e6a
MZ
95 local_irq_enable();
96
97 return ccsidr;
98}
99
3c1e7165
MZ
100/*
101 * See note at ARMv7 ARM B1.14.4 (TL;DR: S/W ops are not easily virtualized).
102 */
7c8c5e6a 103static bool access_dcsw(struct kvm_vcpu *vcpu,
3fec037d 104 struct sys_reg_params *p,
7c8c5e6a
MZ
105 const struct sys_reg_desc *r)
106{
7c8c5e6a 107 if (!p->is_write)
e7f1d1ee 108 return read_from_write_only(vcpu, p, r);
7c8c5e6a 109
3c1e7165 110 kvm_set_way_flush(vcpu);
7c8c5e6a
MZ
111 return true;
112}
113
4d44923b
MZ
114/*
115 * Generic accessor for VM registers. Only called as long as HCR_TVM
3c1e7165
MZ
116 * is set. If the guest enables the MMU, we stop trapping the VM
117 * sys_regs and leave it in complete control of the caches.
4d44923b
MZ
118 */
119static bool access_vm_reg(struct kvm_vcpu *vcpu,
3fec037d 120 struct sys_reg_params *p,
4d44923b
MZ
121 const struct sys_reg_desc *r)
122{
3c1e7165 123 bool was_enabled = vcpu_has_cache_enabled(vcpu);
52f6c4f0
CD
124 u64 val;
125 int reg = r->reg;
4d44923b
MZ
126
127 BUG_ON(!p->is_write);
128
52f6c4f0
CD
129 /* See the 32bit mapping in kvm_host.h */
130 if (p->is_aarch32)
131 reg = r->reg / 2;
132
133 if (!p->is_aarch32 || !p->is_32bit) {
134 val = p->regval;
dedf97e8 135 } else {
52f6c4f0
CD
136 val = vcpu_sys_reg(vcpu, reg);
137 if (r->reg % 2)
138 val = (p->regval << 32) | (u64)lower_32_bits(val);
139 else
140 val = ((u64)upper_32_bits(val) << 32) |
141 lower_32_bits(p->regval);
dedf97e8 142 }
52f6c4f0 143 vcpu_sys_reg(vcpu, reg) = val;
f0a3eaff 144
3c1e7165 145 kvm_toggle_cache(vcpu, was_enabled);
4d44923b
MZ
146 return true;
147}
148
6d52f35a
AP
149/*
150 * Trap handler for the GICv3 SGI generation system register.
151 * Forward the request to the VGIC emulation.
152 * The cp15_64 code makes sure this automatically works
153 * for both AArch64 and AArch32 accesses.
154 */
155static bool access_gic_sgi(struct kvm_vcpu *vcpu,
3fec037d 156 struct sys_reg_params *p,
6d52f35a
AP
157 const struct sys_reg_desc *r)
158{
6d52f35a 159 if (!p->is_write)
e7f1d1ee 160 return read_from_write_only(vcpu, p, r);
6d52f35a 161
2ec5be3d 162 vgic_v3_dispatch_sgi(vcpu, p->regval);
6d52f35a
AP
163
164 return true;
165}
166
b34f2bcb
MZ
167static bool access_gic_sre(struct kvm_vcpu *vcpu,
168 struct sys_reg_params *p,
169 const struct sys_reg_desc *r)
170{
171 if (p->is_write)
172 return ignore_write(vcpu, p);
173
174 p->regval = vcpu->arch.vgic_cpu.vgic_v3.vgic_sre;
175 return true;
176}
177
7609c125 178static bool trap_raz_wi(struct kvm_vcpu *vcpu,
3fec037d 179 struct sys_reg_params *p,
7609c125 180 const struct sys_reg_desc *r)
7c8c5e6a
MZ
181{
182 if (p->is_write)
183 return ignore_write(vcpu, p);
184 else
185 return read_zero(vcpu, p);
186}
187
cc33c4e2
MR
188static bool trap_undef(struct kvm_vcpu *vcpu,
189 struct sys_reg_params *p,
190 const struct sys_reg_desc *r)
191{
192 kvm_inject_undefined(vcpu);
193 return false;
194}
195
0c557ed4 196static bool trap_oslsr_el1(struct kvm_vcpu *vcpu,
3fec037d 197 struct sys_reg_params *p,
0c557ed4
MZ
198 const struct sys_reg_desc *r)
199{
200 if (p->is_write) {
201 return ignore_write(vcpu, p);
202 } else {
2ec5be3d 203 p->regval = (1 << 3);
0c557ed4
MZ
204 return true;
205 }
206}
207
208static bool trap_dbgauthstatus_el1(struct kvm_vcpu *vcpu,
3fec037d 209 struct sys_reg_params *p,
0c557ed4
MZ
210 const struct sys_reg_desc *r)
211{
212 if (p->is_write) {
213 return ignore_write(vcpu, p);
214 } else {
1f3d8699 215 p->regval = read_sysreg(dbgauthstatus_el1);
0c557ed4
MZ
216 return true;
217 }
218}
219
220/*
221 * We want to avoid world-switching all the DBG registers all the
222 * time:
223 *
224 * - If we've touched any debug register, it is likely that we're
225 * going to touch more of them. It then makes sense to disable the
226 * traps and start doing the save/restore dance
227 * - If debug is active (DBG_MDSCR_KDE or DBG_MDSCR_MDE set), it is
228 * then mandatory to save/restore the registers, as the guest
229 * depends on them.
230 *
231 * For this, we use a DIRTY bit, indicating the guest has modified the
232 * debug registers, used as follow:
233 *
234 * On guest entry:
235 * - If the dirty bit is set (because we're coming back from trapping),
236 * disable the traps, save host registers, restore guest registers.
237 * - If debug is actively in use (DBG_MDSCR_KDE or DBG_MDSCR_MDE set),
238 * set the dirty bit, disable the traps, save host registers,
239 * restore guest registers.
240 * - Otherwise, enable the traps
241 *
242 * On guest exit:
243 * - If the dirty bit is set, save guest registers, restore host
244 * registers and clear the dirty bit. This ensure that the host can
245 * now use the debug registers.
246 */
247static bool trap_debug_regs(struct kvm_vcpu *vcpu,
3fec037d 248 struct sys_reg_params *p,
0c557ed4
MZ
249 const struct sys_reg_desc *r)
250{
251 if (p->is_write) {
2ec5be3d 252 vcpu_sys_reg(vcpu, r->reg) = p->regval;
0c557ed4
MZ
253 vcpu->arch.debug_flags |= KVM_ARM64_DEBUG_DIRTY;
254 } else {
2ec5be3d 255 p->regval = vcpu_sys_reg(vcpu, r->reg);
0c557ed4
MZ
256 }
257
2ec5be3d 258 trace_trap_reg(__func__, r->reg, p->is_write, p->regval);
eef8c85a 259
0c557ed4
MZ
260 return true;
261}
262
84e690bf
AB
263/*
264 * reg_to_dbg/dbg_to_reg
265 *
266 * A 32 bit write to a debug register leave top bits alone
267 * A 32 bit read from a debug register only returns the bottom bits
268 *
269 * All writes will set the KVM_ARM64_DEBUG_DIRTY flag to ensure the
270 * hyp.S code switches between host and guest values in future.
271 */
281243cb
MZ
272static void reg_to_dbg(struct kvm_vcpu *vcpu,
273 struct sys_reg_params *p,
274 u64 *dbg_reg)
84e690bf 275{
2ec5be3d 276 u64 val = p->regval;
84e690bf
AB
277
278 if (p->is_32bit) {
279 val &= 0xffffffffUL;
280 val |= ((*dbg_reg >> 32) << 32);
281 }
282
283 *dbg_reg = val;
284 vcpu->arch.debug_flags |= KVM_ARM64_DEBUG_DIRTY;
285}
286
281243cb
MZ
287static void dbg_to_reg(struct kvm_vcpu *vcpu,
288 struct sys_reg_params *p,
289 u64 *dbg_reg)
84e690bf 290{
2ec5be3d 291 p->regval = *dbg_reg;
84e690bf 292 if (p->is_32bit)
2ec5be3d 293 p->regval &= 0xffffffffUL;
84e690bf
AB
294}
295
281243cb
MZ
296static bool trap_bvr(struct kvm_vcpu *vcpu,
297 struct sys_reg_params *p,
298 const struct sys_reg_desc *rd)
84e690bf
AB
299{
300 u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_bvr[rd->reg];
301
302 if (p->is_write)
303 reg_to_dbg(vcpu, p, dbg_reg);
304 else
305 dbg_to_reg(vcpu, p, dbg_reg);
306
eef8c85a
AB
307 trace_trap_reg(__func__, rd->reg, p->is_write, *dbg_reg);
308
84e690bf
AB
309 return true;
310}
311
312static int set_bvr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
313 const struct kvm_one_reg *reg, void __user *uaddr)
314{
315 __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_bvr[rd->reg];
316
1713e5aa 317 if (copy_from_user(r, uaddr, KVM_REG_SIZE(reg->id)) != 0)
84e690bf
AB
318 return -EFAULT;
319 return 0;
320}
321
322static int get_bvr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
323 const struct kvm_one_reg *reg, void __user *uaddr)
324{
325 __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_bvr[rd->reg];
326
327 if (copy_to_user(uaddr, r, KVM_REG_SIZE(reg->id)) != 0)
328 return -EFAULT;
329 return 0;
330}
331
281243cb
MZ
332static void reset_bvr(struct kvm_vcpu *vcpu,
333 const struct sys_reg_desc *rd)
84e690bf
AB
334{
335 vcpu->arch.vcpu_debug_state.dbg_bvr[rd->reg] = rd->val;
336}
337
281243cb
MZ
338static bool trap_bcr(struct kvm_vcpu *vcpu,
339 struct sys_reg_params *p,
340 const struct sys_reg_desc *rd)
84e690bf
AB
341{
342 u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_bcr[rd->reg];
343
344 if (p->is_write)
345 reg_to_dbg(vcpu, p, dbg_reg);
346 else
347 dbg_to_reg(vcpu, p, dbg_reg);
348
eef8c85a
AB
349 trace_trap_reg(__func__, rd->reg, p->is_write, *dbg_reg);
350
84e690bf
AB
351 return true;
352}
353
354static int set_bcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
355 const struct kvm_one_reg *reg, void __user *uaddr)
356{
357 __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_bcr[rd->reg];
358
1713e5aa 359 if (copy_from_user(r, uaddr, KVM_REG_SIZE(reg->id)) != 0)
84e690bf
AB
360 return -EFAULT;
361
362 return 0;
363}
364
365static int get_bcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
366 const struct kvm_one_reg *reg, void __user *uaddr)
367{
368 __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_bcr[rd->reg];
369
370 if (copy_to_user(uaddr, r, KVM_REG_SIZE(reg->id)) != 0)
371 return -EFAULT;
372 return 0;
373}
374
281243cb
MZ
375static void reset_bcr(struct kvm_vcpu *vcpu,
376 const struct sys_reg_desc *rd)
84e690bf
AB
377{
378 vcpu->arch.vcpu_debug_state.dbg_bcr[rd->reg] = rd->val;
379}
380
281243cb
MZ
381static bool trap_wvr(struct kvm_vcpu *vcpu,
382 struct sys_reg_params *p,
383 const struct sys_reg_desc *rd)
84e690bf
AB
384{
385 u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_wvr[rd->reg];
386
387 if (p->is_write)
388 reg_to_dbg(vcpu, p, dbg_reg);
389 else
390 dbg_to_reg(vcpu, p, dbg_reg);
391
eef8c85a
AB
392 trace_trap_reg(__func__, rd->reg, p->is_write,
393 vcpu->arch.vcpu_debug_state.dbg_wvr[rd->reg]);
394
84e690bf
AB
395 return true;
396}
397
398static int set_wvr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
399 const struct kvm_one_reg *reg, void __user *uaddr)
400{
401 __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_wvr[rd->reg];
402
1713e5aa 403 if (copy_from_user(r, uaddr, KVM_REG_SIZE(reg->id)) != 0)
84e690bf
AB
404 return -EFAULT;
405 return 0;
406}
407
408static int get_wvr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
409 const struct kvm_one_reg *reg, void __user *uaddr)
410{
411 __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_wvr[rd->reg];
412
413 if (copy_to_user(uaddr, r, KVM_REG_SIZE(reg->id)) != 0)
414 return -EFAULT;
415 return 0;
416}
417
281243cb
MZ
418static void reset_wvr(struct kvm_vcpu *vcpu,
419 const struct sys_reg_desc *rd)
84e690bf
AB
420{
421 vcpu->arch.vcpu_debug_state.dbg_wvr[rd->reg] = rd->val;
422}
423
281243cb
MZ
424static bool trap_wcr(struct kvm_vcpu *vcpu,
425 struct sys_reg_params *p,
426 const struct sys_reg_desc *rd)
84e690bf
AB
427{
428 u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_wcr[rd->reg];
429
430 if (p->is_write)
431 reg_to_dbg(vcpu, p, dbg_reg);
432 else
433 dbg_to_reg(vcpu, p, dbg_reg);
434
eef8c85a
AB
435 trace_trap_reg(__func__, rd->reg, p->is_write, *dbg_reg);
436
84e690bf
AB
437 return true;
438}
439
440static int set_wcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
441 const struct kvm_one_reg *reg, void __user *uaddr)
442{
443 __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_wcr[rd->reg];
444
1713e5aa 445 if (copy_from_user(r, uaddr, KVM_REG_SIZE(reg->id)) != 0)
84e690bf
AB
446 return -EFAULT;
447 return 0;
448}
449
450static int get_wcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
451 const struct kvm_one_reg *reg, void __user *uaddr)
452{
453 __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_wcr[rd->reg];
454
455 if (copy_to_user(uaddr, r, KVM_REG_SIZE(reg->id)) != 0)
456 return -EFAULT;
457 return 0;
458}
459
281243cb
MZ
460static void reset_wcr(struct kvm_vcpu *vcpu,
461 const struct sys_reg_desc *rd)
84e690bf
AB
462{
463 vcpu->arch.vcpu_debug_state.dbg_wcr[rd->reg] = rd->val;
464}
465
7c8c5e6a
MZ
466static void reset_amair_el1(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
467{
1f3d8699 468 vcpu_sys_reg(vcpu, AMAIR_EL1) = read_sysreg(amair_el1);
7c8c5e6a
MZ
469}
470
471static void reset_mpidr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
472{
4429fc64
AP
473 u64 mpidr;
474
7c8c5e6a 475 /*
4429fc64
AP
476 * Map the vcpu_id into the first three affinity level fields of
477 * the MPIDR. We limit the number of VCPUs in level 0 due to a
478 * limitation to 16 CPUs in that level in the ICC_SGIxR registers
479 * of the GICv3 to be able to address each CPU directly when
480 * sending IPIs.
7c8c5e6a 481 */
4429fc64
AP
482 mpidr = (vcpu->vcpu_id & 0x0f) << MPIDR_LEVEL_SHIFT(0);
483 mpidr |= ((vcpu->vcpu_id >> 4) & 0xff) << MPIDR_LEVEL_SHIFT(1);
484 mpidr |= ((vcpu->vcpu_id >> 12) & 0xff) << MPIDR_LEVEL_SHIFT(2);
485 vcpu_sys_reg(vcpu, MPIDR_EL1) = (1ULL << 31) | mpidr;
7c8c5e6a
MZ
486}
487
ab946834
SZ
488static void reset_pmcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
489{
490 u64 pmcr, val;
491
1f3d8699
MR
492 pmcr = read_sysreg(pmcr_el0);
493 /*
494 * Writable bits of PMCR_EL0 (ARMV8_PMU_PMCR_MASK) are reset to UNKNOWN
ab946834
SZ
495 * except PMCR.E resetting to zero.
496 */
497 val = ((pmcr & ~ARMV8_PMU_PMCR_MASK)
498 | (ARMV8_PMU_PMCR_MASK & 0xdecafbad)) & (~ARMV8_PMU_PMCR_E);
499 vcpu_sys_reg(vcpu, PMCR_EL0) = val;
500}
501
6c007036 502static bool check_pmu_access_disabled(struct kvm_vcpu *vcpu, u64 flags)
d692b8ad
SZ
503{
504 u64 reg = vcpu_sys_reg(vcpu, PMUSERENR_EL0);
6c007036 505 bool enabled = (reg & flags) || vcpu_mode_priv(vcpu);
d692b8ad 506
24d5950f
MZ
507 if (!enabled)
508 kvm_inject_undefined(vcpu);
d692b8ad 509
6c007036 510 return !enabled;
d692b8ad
SZ
511}
512
6c007036 513static bool pmu_access_el0_disabled(struct kvm_vcpu *vcpu)
d692b8ad 514{
6c007036
MZ
515 return check_pmu_access_disabled(vcpu, ARMV8_PMU_USERENR_EN);
516}
d692b8ad 517
6c007036
MZ
518static bool pmu_write_swinc_el0_disabled(struct kvm_vcpu *vcpu)
519{
520 return check_pmu_access_disabled(vcpu, ARMV8_PMU_USERENR_SW | ARMV8_PMU_USERENR_EN);
d692b8ad
SZ
521}
522
523static bool pmu_access_cycle_counter_el0_disabled(struct kvm_vcpu *vcpu)
524{
6c007036 525 return check_pmu_access_disabled(vcpu, ARMV8_PMU_USERENR_CR | ARMV8_PMU_USERENR_EN);
d692b8ad
SZ
526}
527
528static bool pmu_access_event_counter_el0_disabled(struct kvm_vcpu *vcpu)
529{
6c007036 530 return check_pmu_access_disabled(vcpu, ARMV8_PMU_USERENR_ER | ARMV8_PMU_USERENR_EN);
d692b8ad
SZ
531}
532
ab946834
SZ
533static bool access_pmcr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
534 const struct sys_reg_desc *r)
535{
536 u64 val;
537
538 if (!kvm_arm_pmu_v3_ready(vcpu))
539 return trap_raz_wi(vcpu, p, r);
540
d692b8ad
SZ
541 if (pmu_access_el0_disabled(vcpu))
542 return false;
543
ab946834
SZ
544 if (p->is_write) {
545 /* Only update writeable bits of PMCR */
546 val = vcpu_sys_reg(vcpu, PMCR_EL0);
547 val &= ~ARMV8_PMU_PMCR_MASK;
548 val |= p->regval & ARMV8_PMU_PMCR_MASK;
549 vcpu_sys_reg(vcpu, PMCR_EL0) = val;
76993739 550 kvm_pmu_handle_pmcr(vcpu, val);
ab946834
SZ
551 } else {
552 /* PMCR.P & PMCR.C are RAZ */
553 val = vcpu_sys_reg(vcpu, PMCR_EL0)
554 & ~(ARMV8_PMU_PMCR_P | ARMV8_PMU_PMCR_C);
555 p->regval = val;
556 }
557
558 return true;
559}
560
3965c3ce
SZ
561static bool access_pmselr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
562 const struct sys_reg_desc *r)
563{
564 if (!kvm_arm_pmu_v3_ready(vcpu))
565 return trap_raz_wi(vcpu, p, r);
566
d692b8ad
SZ
567 if (pmu_access_event_counter_el0_disabled(vcpu))
568 return false;
569
3965c3ce
SZ
570 if (p->is_write)
571 vcpu_sys_reg(vcpu, PMSELR_EL0) = p->regval;
572 else
573 /* return PMSELR.SEL field */
574 p->regval = vcpu_sys_reg(vcpu, PMSELR_EL0)
575 & ARMV8_PMU_COUNTER_MASK;
576
577 return true;
578}
579
a86b5505
SZ
580static bool access_pmceid(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
581 const struct sys_reg_desc *r)
582{
583 u64 pmceid;
584
585 if (!kvm_arm_pmu_v3_ready(vcpu))
586 return trap_raz_wi(vcpu, p, r);
587
588 BUG_ON(p->is_write);
589
d692b8ad
SZ
590 if (pmu_access_el0_disabled(vcpu))
591 return false;
592
a86b5505 593 if (!(p->Op2 & 1))
1f3d8699 594 pmceid = read_sysreg(pmceid0_el0);
a86b5505 595 else
1f3d8699 596 pmceid = read_sysreg(pmceid1_el0);
a86b5505
SZ
597
598 p->regval = pmceid;
599
600 return true;
601}
602
051ff581
SZ
603static bool pmu_counter_idx_valid(struct kvm_vcpu *vcpu, u64 idx)
604{
605 u64 pmcr, val;
606
607 pmcr = vcpu_sys_reg(vcpu, PMCR_EL0);
608 val = (pmcr >> ARMV8_PMU_PMCR_N_SHIFT) & ARMV8_PMU_PMCR_N_MASK;
24d5950f
MZ
609 if (idx >= val && idx != ARMV8_PMU_CYCLE_IDX) {
610 kvm_inject_undefined(vcpu);
051ff581 611 return false;
24d5950f 612 }
051ff581
SZ
613
614 return true;
615}
616
617static bool access_pmu_evcntr(struct kvm_vcpu *vcpu,
618 struct sys_reg_params *p,
619 const struct sys_reg_desc *r)
620{
621 u64 idx;
622
623 if (!kvm_arm_pmu_v3_ready(vcpu))
624 return trap_raz_wi(vcpu, p, r);
625
626 if (r->CRn == 9 && r->CRm == 13) {
627 if (r->Op2 == 2) {
628 /* PMXEVCNTR_EL0 */
d692b8ad
SZ
629 if (pmu_access_event_counter_el0_disabled(vcpu))
630 return false;
631
051ff581
SZ
632 idx = vcpu_sys_reg(vcpu, PMSELR_EL0)
633 & ARMV8_PMU_COUNTER_MASK;
634 } else if (r->Op2 == 0) {
635 /* PMCCNTR_EL0 */
d692b8ad
SZ
636 if (pmu_access_cycle_counter_el0_disabled(vcpu))
637 return false;
638
051ff581
SZ
639 idx = ARMV8_PMU_CYCLE_IDX;
640 } else {
9e3f7a29 641 return false;
051ff581 642 }
9e3f7a29
WH
643 } else if (r->CRn == 0 && r->CRm == 9) {
644 /* PMCCNTR */
645 if (pmu_access_event_counter_el0_disabled(vcpu))
646 return false;
647
648 idx = ARMV8_PMU_CYCLE_IDX;
051ff581
SZ
649 } else if (r->CRn == 14 && (r->CRm & 12) == 8) {
650 /* PMEVCNTRn_EL0 */
d692b8ad
SZ
651 if (pmu_access_event_counter_el0_disabled(vcpu))
652 return false;
653
051ff581
SZ
654 idx = ((r->CRm & 3) << 3) | (r->Op2 & 7);
655 } else {
9e3f7a29 656 return false;
051ff581
SZ
657 }
658
659 if (!pmu_counter_idx_valid(vcpu, idx))
660 return false;
661
d692b8ad
SZ
662 if (p->is_write) {
663 if (pmu_access_el0_disabled(vcpu))
664 return false;
665
051ff581 666 kvm_pmu_set_counter_value(vcpu, idx, p->regval);
d692b8ad 667 } else {
051ff581 668 p->regval = kvm_pmu_get_counter_value(vcpu, idx);
d692b8ad 669 }
051ff581
SZ
670
671 return true;
672}
673
9feb21ac
SZ
674static bool access_pmu_evtyper(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
675 const struct sys_reg_desc *r)
676{
677 u64 idx, reg;
678
679 if (!kvm_arm_pmu_v3_ready(vcpu))
680 return trap_raz_wi(vcpu, p, r);
681
d692b8ad
SZ
682 if (pmu_access_el0_disabled(vcpu))
683 return false;
684
9feb21ac
SZ
685 if (r->CRn == 9 && r->CRm == 13 && r->Op2 == 1) {
686 /* PMXEVTYPER_EL0 */
687 idx = vcpu_sys_reg(vcpu, PMSELR_EL0) & ARMV8_PMU_COUNTER_MASK;
688 reg = PMEVTYPER0_EL0 + idx;
689 } else if (r->CRn == 14 && (r->CRm & 12) == 12) {
690 idx = ((r->CRm & 3) << 3) | (r->Op2 & 7);
691 if (idx == ARMV8_PMU_CYCLE_IDX)
692 reg = PMCCFILTR_EL0;
693 else
694 /* PMEVTYPERn_EL0 */
695 reg = PMEVTYPER0_EL0 + idx;
696 } else {
697 BUG();
698 }
699
700 if (!pmu_counter_idx_valid(vcpu, idx))
701 return false;
702
703 if (p->is_write) {
704 kvm_pmu_set_counter_event_type(vcpu, p->regval, idx);
705 vcpu_sys_reg(vcpu, reg) = p->regval & ARMV8_PMU_EVTYPE_MASK;
706 } else {
707 p->regval = vcpu_sys_reg(vcpu, reg) & ARMV8_PMU_EVTYPE_MASK;
708 }
709
710 return true;
711}
712
96b0eebc
SZ
713static bool access_pmcnten(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
714 const struct sys_reg_desc *r)
715{
716 u64 val, mask;
717
718 if (!kvm_arm_pmu_v3_ready(vcpu))
719 return trap_raz_wi(vcpu, p, r);
720
d692b8ad
SZ
721 if (pmu_access_el0_disabled(vcpu))
722 return false;
723
96b0eebc
SZ
724 mask = kvm_pmu_valid_counter_mask(vcpu);
725 if (p->is_write) {
726 val = p->regval & mask;
727 if (r->Op2 & 0x1) {
728 /* accessing PMCNTENSET_EL0 */
729 vcpu_sys_reg(vcpu, PMCNTENSET_EL0) |= val;
730 kvm_pmu_enable_counter(vcpu, val);
731 } else {
732 /* accessing PMCNTENCLR_EL0 */
733 vcpu_sys_reg(vcpu, PMCNTENSET_EL0) &= ~val;
734 kvm_pmu_disable_counter(vcpu, val);
735 }
736 } else {
737 p->regval = vcpu_sys_reg(vcpu, PMCNTENSET_EL0) & mask;
738 }
739
740 return true;
741}
742
9db52c78
SZ
743static bool access_pminten(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
744 const struct sys_reg_desc *r)
745{
746 u64 mask = kvm_pmu_valid_counter_mask(vcpu);
747
748 if (!kvm_arm_pmu_v3_ready(vcpu))
749 return trap_raz_wi(vcpu, p, r);
750
9008c235
MZ
751 if (!vcpu_mode_priv(vcpu)) {
752 kvm_inject_undefined(vcpu);
d692b8ad 753 return false;
9008c235 754 }
d692b8ad 755
9db52c78
SZ
756 if (p->is_write) {
757 u64 val = p->regval & mask;
758
759 if (r->Op2 & 0x1)
760 /* accessing PMINTENSET_EL1 */
761 vcpu_sys_reg(vcpu, PMINTENSET_EL1) |= val;
762 else
763 /* accessing PMINTENCLR_EL1 */
764 vcpu_sys_reg(vcpu, PMINTENSET_EL1) &= ~val;
765 } else {
766 p->regval = vcpu_sys_reg(vcpu, PMINTENSET_EL1) & mask;
767 }
768
769 return true;
770}
771
76d883c4
SZ
772static bool access_pmovs(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
773 const struct sys_reg_desc *r)
774{
775 u64 mask = kvm_pmu_valid_counter_mask(vcpu);
776
777 if (!kvm_arm_pmu_v3_ready(vcpu))
778 return trap_raz_wi(vcpu, p, r);
779
d692b8ad
SZ
780 if (pmu_access_el0_disabled(vcpu))
781 return false;
782
76d883c4
SZ
783 if (p->is_write) {
784 if (r->CRm & 0x2)
785 /* accessing PMOVSSET_EL0 */
d9f89b4e 786 vcpu_sys_reg(vcpu, PMOVSSET_EL0) |= (p->regval & mask);
76d883c4
SZ
787 else
788 /* accessing PMOVSCLR_EL0 */
789 vcpu_sys_reg(vcpu, PMOVSSET_EL0) &= ~(p->regval & mask);
790 } else {
791 p->regval = vcpu_sys_reg(vcpu, PMOVSSET_EL0) & mask;
792 }
793
794 return true;
795}
796
7a0adc70
SZ
797static bool access_pmswinc(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
798 const struct sys_reg_desc *r)
799{
800 u64 mask;
801
802 if (!kvm_arm_pmu_v3_ready(vcpu))
803 return trap_raz_wi(vcpu, p, r);
804
e0443230 805 if (!p->is_write)
e7f1d1ee 806 return read_from_write_only(vcpu, p, r);
e0443230 807
d692b8ad
SZ
808 if (pmu_write_swinc_el0_disabled(vcpu))
809 return false;
810
e0443230
MZ
811 mask = kvm_pmu_valid_counter_mask(vcpu);
812 kvm_pmu_software_increment(vcpu, p->regval & mask);
813 return true;
7a0adc70
SZ
814}
815
d692b8ad
SZ
816static bool access_pmuserenr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
817 const struct sys_reg_desc *r)
818{
819 if (!kvm_arm_pmu_v3_ready(vcpu))
820 return trap_raz_wi(vcpu, p, r);
821
822 if (p->is_write) {
9008c235
MZ
823 if (!vcpu_mode_priv(vcpu)) {
824 kvm_inject_undefined(vcpu);
d692b8ad 825 return false;
9008c235 826 }
d692b8ad
SZ
827
828 vcpu_sys_reg(vcpu, PMUSERENR_EL0) = p->regval
829 & ARMV8_PMU_USERENR_MASK;
830 } else {
831 p->regval = vcpu_sys_reg(vcpu, PMUSERENR_EL0)
832 & ARMV8_PMU_USERENR_MASK;
833 }
834
835 return true;
836}
837
0c557ed4
MZ
838/* Silly macro to expand the DBG{BCR,BVR,WVR,WCR}n_EL1 registers in one go */
839#define DBG_BCR_BVR_WCR_WVR_EL1(n) \
ee1b64e6 840 { SYS_DESC(SYS_DBGBVRn_EL1(n)), \
84e690bf 841 trap_bvr, reset_bvr, n, 0, get_bvr, set_bvr }, \
ee1b64e6 842 { SYS_DESC(SYS_DBGBCRn_EL1(n)), \
84e690bf 843 trap_bcr, reset_bcr, n, 0, get_bcr, set_bcr }, \
ee1b64e6 844 { SYS_DESC(SYS_DBGWVRn_EL1(n)), \
84e690bf 845 trap_wvr, reset_wvr, n, 0, get_wvr, set_wvr }, \
ee1b64e6 846 { SYS_DESC(SYS_DBGWCRn_EL1(n)), \
84e690bf 847 trap_wcr, reset_wcr, n, 0, get_wcr, set_wcr }
0c557ed4 848
051ff581
SZ
849/* Macro to expand the PMEVCNTRn_EL0 register */
850#define PMU_PMEVCNTR_EL0(n) \
174ed3e4 851 { SYS_DESC(SYS_PMEVCNTRn_EL0(n)), \
051ff581
SZ
852 access_pmu_evcntr, reset_unknown, (PMEVCNTR0_EL0 + n), }
853
9feb21ac
SZ
854/* Macro to expand the PMEVTYPERn_EL0 register */
855#define PMU_PMEVTYPER_EL0(n) \
174ed3e4 856 { SYS_DESC(SYS_PMEVTYPERn_EL0(n)), \
9feb21ac
SZ
857 access_pmu_evtyper, reset_unknown, (PMEVTYPER0_EL0 + n), }
858
c9a3c58f
JL
859static bool access_cntp_tval(struct kvm_vcpu *vcpu,
860 struct sys_reg_params *p,
861 const struct sys_reg_desc *r)
862{
7b6b4631 863 u64 now = kvm_phys_timer_read();
c1b135af 864 u64 cval;
7b6b4631 865
c1b135af
CD
866 if (p->is_write) {
867 kvm_arm_timer_set_reg(vcpu, KVM_REG_ARM_PTIMER_CVAL,
868 p->regval + now);
869 } else {
870 cval = kvm_arm_timer_get_reg(vcpu, KVM_REG_ARM_PTIMER_CVAL);
871 p->regval = cval - now;
872 }
7b6b4631 873
c9a3c58f
JL
874 return true;
875}
876
877static bool access_cntp_ctl(struct kvm_vcpu *vcpu,
878 struct sys_reg_params *p,
879 const struct sys_reg_desc *r)
880{
c1b135af
CD
881 if (p->is_write)
882 kvm_arm_timer_set_reg(vcpu, KVM_REG_ARM_PTIMER_CTL, p->regval);
883 else
884 p->regval = kvm_arm_timer_get_reg(vcpu, KVM_REG_ARM_PTIMER_CTL);
7b6b4631 885
c9a3c58f
JL
886 return true;
887}
888
889static bool access_cntp_cval(struct kvm_vcpu *vcpu,
890 struct sys_reg_params *p,
891 const struct sys_reg_desc *r)
892{
7b6b4631 893 if (p->is_write)
c1b135af 894 kvm_arm_timer_set_reg(vcpu, KVM_REG_ARM_PTIMER_CVAL, p->regval);
7b6b4631 895 else
c1b135af 896 p->regval = kvm_arm_timer_get_reg(vcpu, KVM_REG_ARM_PTIMER_CVAL);
7b6b4631 897
c9a3c58f
JL
898 return true;
899}
900
93390c0a
DM
901/* Read a sanitised cpufeature ID register by sys_reg_desc */
902static u64 read_id_reg(struct sys_reg_desc const *r, bool raz)
903{
904 u32 id = sys_reg((u32)r->Op0, (u32)r->Op1,
905 (u32)r->CRn, (u32)r->CRm, (u32)r->Op2);
07d79fe7 906 u64 val = raz ? 0 : read_sanitised_ftr_reg(id);
93390c0a 907
07d79fe7
DM
908 if (id == SYS_ID_AA64PFR0_EL1) {
909 if (val & (0xfUL << ID_AA64PFR0_SVE_SHIFT))
910 pr_err_once("kvm [%i]: SVE unsupported for guests, suppressing\n",
911 task_pid_nr(current));
912
913 val &= ~(0xfUL << ID_AA64PFR0_SVE_SHIFT);
cc33c4e2
MR
914 } else if (id == SYS_ID_AA64MMFR1_EL1) {
915 if (val & (0xfUL << ID_AA64MMFR1_LOR_SHIFT))
916 pr_err_once("kvm [%i]: LORegions unsupported for guests, suppressing\n",
917 task_pid_nr(current));
918
919 val &= ~(0xfUL << ID_AA64MMFR1_LOR_SHIFT);
07d79fe7
DM
920 }
921
922 return val;
93390c0a
DM
923}
924
925/* cpufeature ID register access trap handlers */
926
927static bool __access_id_reg(struct kvm_vcpu *vcpu,
928 struct sys_reg_params *p,
929 const struct sys_reg_desc *r,
930 bool raz)
931{
932 if (p->is_write)
933 return write_to_read_only(vcpu, p, r);
934
935 p->regval = read_id_reg(r, raz);
936 return true;
937}
938
939static bool access_id_reg(struct kvm_vcpu *vcpu,
940 struct sys_reg_params *p,
941 const struct sys_reg_desc *r)
942{
943 return __access_id_reg(vcpu, p, r, false);
944}
945
946static bool access_raz_id_reg(struct kvm_vcpu *vcpu,
947 struct sys_reg_params *p,
948 const struct sys_reg_desc *r)
949{
950 return __access_id_reg(vcpu, p, r, true);
951}
952
953static int reg_from_user(u64 *val, const void __user *uaddr, u64 id);
954static int reg_to_user(void __user *uaddr, const u64 *val, u64 id);
955static u64 sys_reg_to_index(const struct sys_reg_desc *reg);
956
957/*
958 * cpufeature ID register user accessors
959 *
960 * For now, these registers are immutable for userspace, so no values
961 * are stored, and for set_id_reg() we don't allow the effective value
962 * to be changed.
963 */
964static int __get_id_reg(const struct sys_reg_desc *rd, void __user *uaddr,
965 bool raz)
966{
967 const u64 id = sys_reg_to_index(rd);
968 const u64 val = read_id_reg(rd, raz);
969
970 return reg_to_user(uaddr, &val, id);
971}
972
973static int __set_id_reg(const struct sys_reg_desc *rd, void __user *uaddr,
974 bool raz)
975{
976 const u64 id = sys_reg_to_index(rd);
977 int err;
978 u64 val;
979
980 err = reg_from_user(&val, uaddr, id);
981 if (err)
982 return err;
983
984 /* This is what we mean by invariant: you can't change it. */
985 if (val != read_id_reg(rd, raz))
986 return -EINVAL;
987
988 return 0;
989}
990
991static int get_id_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
992 const struct kvm_one_reg *reg, void __user *uaddr)
993{
994 return __get_id_reg(rd, uaddr, false);
995}
996
997static int set_id_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
998 const struct kvm_one_reg *reg, void __user *uaddr)
999{
1000 return __set_id_reg(rd, uaddr, false);
1001}
1002
1003static int get_raz_id_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
1004 const struct kvm_one_reg *reg, void __user *uaddr)
1005{
1006 return __get_id_reg(rd, uaddr, true);
1007}
1008
1009static int set_raz_id_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
1010 const struct kvm_one_reg *reg, void __user *uaddr)
1011{
1012 return __set_id_reg(rd, uaddr, true);
1013}
1014
1015/* sys_reg_desc initialiser for known cpufeature ID registers */
1016#define ID_SANITISED(name) { \
1017 SYS_DESC(SYS_##name), \
1018 .access = access_id_reg, \
1019 .get_user = get_id_reg, \
1020 .set_user = set_id_reg, \
1021}
1022
1023/*
1024 * sys_reg_desc initialiser for architecturally unallocated cpufeature ID
1025 * register with encoding Op0=3, Op1=0, CRn=0, CRm=crm, Op2=op2
1026 * (1 <= crm < 8, 0 <= Op2 < 8).
1027 */
1028#define ID_UNALLOCATED(crm, op2) { \
1029 Op0(3), Op1(0), CRn(0), CRm(crm), Op2(op2), \
1030 .access = access_raz_id_reg, \
1031 .get_user = get_raz_id_reg, \
1032 .set_user = set_raz_id_reg, \
1033}
1034
1035/*
1036 * sys_reg_desc initialiser for known ID registers that we hide from guests.
1037 * For now, these are exposed just like unallocated ID regs: they appear
1038 * RAZ for the guest.
1039 */
1040#define ID_HIDDEN(name) { \
1041 SYS_DESC(SYS_##name), \
1042 .access = access_raz_id_reg, \
1043 .get_user = get_raz_id_reg, \
1044 .set_user = set_raz_id_reg, \
1045}
1046
7c8c5e6a
MZ
1047/*
1048 * Architected system registers.
1049 * Important: Must be sorted ascending by Op0, Op1, CRn, CRm, Op2
7609c125 1050 *
0c557ed4
MZ
1051 * Debug handling: We do trap most, if not all debug related system
1052 * registers. The implementation is good enough to ensure that a guest
1053 * can use these with minimal performance degradation. The drawback is
1054 * that we don't implement any of the external debug, none of the
1055 * OSlock protocol. This should be revisited if we ever encounter a
1056 * more demanding guest...
7c8c5e6a
MZ
1057 */
1058static const struct sys_reg_desc sys_reg_descs[] = {
7606e078
MR
1059 { SYS_DESC(SYS_DC_ISW), access_dcsw },
1060 { SYS_DESC(SYS_DC_CSW), access_dcsw },
1061 { SYS_DESC(SYS_DC_CISW), access_dcsw },
7c8c5e6a 1062
0c557ed4
MZ
1063 DBG_BCR_BVR_WCR_WVR_EL1(0),
1064 DBG_BCR_BVR_WCR_WVR_EL1(1),
ee1b64e6
MR
1065 { SYS_DESC(SYS_MDCCINT_EL1), trap_debug_regs, reset_val, MDCCINT_EL1, 0 },
1066 { SYS_DESC(SYS_MDSCR_EL1), trap_debug_regs, reset_val, MDSCR_EL1, 0 },
0c557ed4
MZ
1067 DBG_BCR_BVR_WCR_WVR_EL1(2),
1068 DBG_BCR_BVR_WCR_WVR_EL1(3),
1069 DBG_BCR_BVR_WCR_WVR_EL1(4),
1070 DBG_BCR_BVR_WCR_WVR_EL1(5),
1071 DBG_BCR_BVR_WCR_WVR_EL1(6),
1072 DBG_BCR_BVR_WCR_WVR_EL1(7),
1073 DBG_BCR_BVR_WCR_WVR_EL1(8),
1074 DBG_BCR_BVR_WCR_WVR_EL1(9),
1075 DBG_BCR_BVR_WCR_WVR_EL1(10),
1076 DBG_BCR_BVR_WCR_WVR_EL1(11),
1077 DBG_BCR_BVR_WCR_WVR_EL1(12),
1078 DBG_BCR_BVR_WCR_WVR_EL1(13),
1079 DBG_BCR_BVR_WCR_WVR_EL1(14),
1080 DBG_BCR_BVR_WCR_WVR_EL1(15),
1081
ee1b64e6
MR
1082 { SYS_DESC(SYS_MDRAR_EL1), trap_raz_wi },
1083 { SYS_DESC(SYS_OSLAR_EL1), trap_raz_wi },
1084 { SYS_DESC(SYS_OSLSR_EL1), trap_oslsr_el1 },
1085 { SYS_DESC(SYS_OSDLR_EL1), trap_raz_wi },
1086 { SYS_DESC(SYS_DBGPRCR_EL1), trap_raz_wi },
1087 { SYS_DESC(SYS_DBGCLAIMSET_EL1), trap_raz_wi },
1088 { SYS_DESC(SYS_DBGCLAIMCLR_EL1), trap_raz_wi },
1089 { SYS_DESC(SYS_DBGAUTHSTATUS_EL1), trap_dbgauthstatus_el1 },
1090
1091 { SYS_DESC(SYS_MDCCSR_EL0), trap_raz_wi },
1092 { SYS_DESC(SYS_DBGDTR_EL0), trap_raz_wi },
1093 // DBGDTR[TR]X_EL0 share the same encoding
1094 { SYS_DESC(SYS_DBGDTRTX_EL0), trap_raz_wi },
1095
1096 { SYS_DESC(SYS_DBGVCR32_EL2), NULL, reset_val, DBGVCR32_EL2, 0 },
62a89c44 1097
851050a5 1098 { SYS_DESC(SYS_MPIDR_EL1), NULL, reset_mpidr, MPIDR_EL1 },
93390c0a
DM
1099
1100 /*
1101 * ID regs: all ID_SANITISED() entries here must have corresponding
1102 * entries in arm64_ftr_regs[].
1103 */
1104
1105 /* AArch64 mappings of the AArch32 ID registers */
1106 /* CRm=1 */
1107 ID_SANITISED(ID_PFR0_EL1),
1108 ID_SANITISED(ID_PFR1_EL1),
1109 ID_SANITISED(ID_DFR0_EL1),
1110 ID_HIDDEN(ID_AFR0_EL1),
1111 ID_SANITISED(ID_MMFR0_EL1),
1112 ID_SANITISED(ID_MMFR1_EL1),
1113 ID_SANITISED(ID_MMFR2_EL1),
1114 ID_SANITISED(ID_MMFR3_EL1),
1115
1116 /* CRm=2 */
1117 ID_SANITISED(ID_ISAR0_EL1),
1118 ID_SANITISED(ID_ISAR1_EL1),
1119 ID_SANITISED(ID_ISAR2_EL1),
1120 ID_SANITISED(ID_ISAR3_EL1),
1121 ID_SANITISED(ID_ISAR4_EL1),
1122 ID_SANITISED(ID_ISAR5_EL1),
1123 ID_SANITISED(ID_MMFR4_EL1),
1124 ID_UNALLOCATED(2,7),
1125
1126 /* CRm=3 */
1127 ID_SANITISED(MVFR0_EL1),
1128 ID_SANITISED(MVFR1_EL1),
1129 ID_SANITISED(MVFR2_EL1),
1130 ID_UNALLOCATED(3,3),
1131 ID_UNALLOCATED(3,4),
1132 ID_UNALLOCATED(3,5),
1133 ID_UNALLOCATED(3,6),
1134 ID_UNALLOCATED(3,7),
1135
1136 /* AArch64 ID registers */
1137 /* CRm=4 */
1138 ID_SANITISED(ID_AA64PFR0_EL1),
1139 ID_SANITISED(ID_AA64PFR1_EL1),
1140 ID_UNALLOCATED(4,2),
1141 ID_UNALLOCATED(4,3),
1142 ID_UNALLOCATED(4,4),
1143 ID_UNALLOCATED(4,5),
1144 ID_UNALLOCATED(4,6),
1145 ID_UNALLOCATED(4,7),
1146
1147 /* CRm=5 */
1148 ID_SANITISED(ID_AA64DFR0_EL1),
1149 ID_SANITISED(ID_AA64DFR1_EL1),
1150 ID_UNALLOCATED(5,2),
1151 ID_UNALLOCATED(5,3),
1152 ID_HIDDEN(ID_AA64AFR0_EL1),
1153 ID_HIDDEN(ID_AA64AFR1_EL1),
1154 ID_UNALLOCATED(5,6),
1155 ID_UNALLOCATED(5,7),
1156
1157 /* CRm=6 */
1158 ID_SANITISED(ID_AA64ISAR0_EL1),
1159 ID_SANITISED(ID_AA64ISAR1_EL1),
1160 ID_UNALLOCATED(6,2),
1161 ID_UNALLOCATED(6,3),
1162 ID_UNALLOCATED(6,4),
1163 ID_UNALLOCATED(6,5),
1164 ID_UNALLOCATED(6,6),
1165 ID_UNALLOCATED(6,7),
1166
1167 /* CRm=7 */
1168 ID_SANITISED(ID_AA64MMFR0_EL1),
1169 ID_SANITISED(ID_AA64MMFR1_EL1),
1170 ID_SANITISED(ID_AA64MMFR2_EL1),
1171 ID_UNALLOCATED(7,3),
1172 ID_UNALLOCATED(7,4),
1173 ID_UNALLOCATED(7,5),
1174 ID_UNALLOCATED(7,6),
1175 ID_UNALLOCATED(7,7),
1176
851050a5
MR
1177 { SYS_DESC(SYS_SCTLR_EL1), access_vm_reg, reset_val, SCTLR_EL1, 0x00C50078 },
1178 { SYS_DESC(SYS_CPACR_EL1), NULL, reset_val, CPACR_EL1, 0 },
1179 { SYS_DESC(SYS_TTBR0_EL1), access_vm_reg, reset_unknown, TTBR0_EL1 },
1180 { SYS_DESC(SYS_TTBR1_EL1), access_vm_reg, reset_unknown, TTBR1_EL1 },
1181 { SYS_DESC(SYS_TCR_EL1), access_vm_reg, reset_val, TCR_EL1, 0 },
1182
1183 { SYS_DESC(SYS_AFSR0_EL1), access_vm_reg, reset_unknown, AFSR0_EL1 },
1184 { SYS_DESC(SYS_AFSR1_EL1), access_vm_reg, reset_unknown, AFSR1_EL1 },
1185 { SYS_DESC(SYS_ESR_EL1), access_vm_reg, reset_unknown, ESR_EL1 },
558daf69
DG
1186
1187 { SYS_DESC(SYS_ERRIDR_EL1), trap_raz_wi },
1188 { SYS_DESC(SYS_ERRSELR_EL1), trap_raz_wi },
1189 { SYS_DESC(SYS_ERXFR_EL1), trap_raz_wi },
1190 { SYS_DESC(SYS_ERXCTLR_EL1), trap_raz_wi },
1191 { SYS_DESC(SYS_ERXSTATUS_EL1), trap_raz_wi },
1192 { SYS_DESC(SYS_ERXADDR_EL1), trap_raz_wi },
1193 { SYS_DESC(SYS_ERXMISC0_EL1), trap_raz_wi },
1194 { SYS_DESC(SYS_ERXMISC1_EL1), trap_raz_wi },
1195
851050a5
MR
1196 { SYS_DESC(SYS_FAR_EL1), access_vm_reg, reset_unknown, FAR_EL1 },
1197 { SYS_DESC(SYS_PAR_EL1), NULL, reset_unknown, PAR_EL1 },
7c8c5e6a 1198
174ed3e4
MR
1199 { SYS_DESC(SYS_PMINTENSET_EL1), access_pminten, reset_unknown, PMINTENSET_EL1 },
1200 { SYS_DESC(SYS_PMINTENCLR_EL1), access_pminten, NULL, PMINTENSET_EL1 },
7c8c5e6a 1201
851050a5
MR
1202 { SYS_DESC(SYS_MAIR_EL1), access_vm_reg, reset_unknown, MAIR_EL1 },
1203 { SYS_DESC(SYS_AMAIR_EL1), access_vm_reg, reset_amair_el1, AMAIR_EL1 },
7c8c5e6a 1204
cc33c4e2
MR
1205 { SYS_DESC(SYS_LORSA_EL1), trap_undef },
1206 { SYS_DESC(SYS_LOREA_EL1), trap_undef },
1207 { SYS_DESC(SYS_LORN_EL1), trap_undef },
1208 { SYS_DESC(SYS_LORC_EL1), trap_undef },
1209 { SYS_DESC(SYS_LORID_EL1), trap_undef },
1210
851050a5 1211 { SYS_DESC(SYS_VBAR_EL1), NULL, reset_val, VBAR_EL1, 0 },
c773ae2b 1212 { SYS_DESC(SYS_DISR_EL1), NULL, reset_val, DISR_EL1, 0 },
db7dedd0 1213
7b1dba1f 1214 { SYS_DESC(SYS_ICC_IAR0_EL1), write_to_read_only },
e7f1d1ee 1215 { SYS_DESC(SYS_ICC_EOIR0_EL1), read_from_write_only },
7b1dba1f 1216 { SYS_DESC(SYS_ICC_HPPIR0_EL1), write_to_read_only },
e7f1d1ee 1217 { SYS_DESC(SYS_ICC_DIR_EL1), read_from_write_only },
7b1dba1f 1218 { SYS_DESC(SYS_ICC_RPR_EL1), write_to_read_only },
e804d208 1219 { SYS_DESC(SYS_ICC_SGI1R_EL1), access_gic_sgi },
7b1dba1f 1220 { SYS_DESC(SYS_ICC_IAR1_EL1), write_to_read_only },
e7f1d1ee 1221 { SYS_DESC(SYS_ICC_EOIR1_EL1), read_from_write_only },
7b1dba1f 1222 { SYS_DESC(SYS_ICC_HPPIR1_EL1), write_to_read_only },
e804d208 1223 { SYS_DESC(SYS_ICC_SRE_EL1), access_gic_sre },
db7dedd0 1224
851050a5
MR
1225 { SYS_DESC(SYS_CONTEXTIDR_EL1), access_vm_reg, reset_val, CONTEXTIDR_EL1, 0 },
1226 { SYS_DESC(SYS_TPIDR_EL1), NULL, reset_unknown, TPIDR_EL1 },
7c8c5e6a 1227
851050a5 1228 { SYS_DESC(SYS_CNTKCTL_EL1), NULL, reset_val, CNTKCTL_EL1, 0},
7c8c5e6a 1229
851050a5 1230 { SYS_DESC(SYS_CSSELR_EL1), NULL, reset_unknown, CSSELR_EL1 },
7c8c5e6a 1231
174ed3e4
MR
1232 { SYS_DESC(SYS_PMCR_EL0), access_pmcr, reset_pmcr, },
1233 { SYS_DESC(SYS_PMCNTENSET_EL0), access_pmcnten, reset_unknown, PMCNTENSET_EL0 },
1234 { SYS_DESC(SYS_PMCNTENCLR_EL0), access_pmcnten, NULL, PMCNTENSET_EL0 },
1235 { SYS_DESC(SYS_PMOVSCLR_EL0), access_pmovs, NULL, PMOVSSET_EL0 },
1236 { SYS_DESC(SYS_PMSWINC_EL0), access_pmswinc, reset_unknown, PMSWINC_EL0 },
1237 { SYS_DESC(SYS_PMSELR_EL0), access_pmselr, reset_unknown, PMSELR_EL0 },
1238 { SYS_DESC(SYS_PMCEID0_EL0), access_pmceid },
1239 { SYS_DESC(SYS_PMCEID1_EL0), access_pmceid },
1240 { SYS_DESC(SYS_PMCCNTR_EL0), access_pmu_evcntr, reset_unknown, PMCCNTR_EL0 },
1241 { SYS_DESC(SYS_PMXEVTYPER_EL0), access_pmu_evtyper },
1242 { SYS_DESC(SYS_PMXEVCNTR_EL0), access_pmu_evcntr },
1243 /*
1244 * PMUSERENR_EL0 resets as unknown in 64bit mode while it resets as zero
d692b8ad
SZ
1245 * in 32bit mode. Here we choose to reset it as zero for consistency.
1246 */
174ed3e4
MR
1247 { SYS_DESC(SYS_PMUSERENR_EL0), access_pmuserenr, reset_val, PMUSERENR_EL0, 0 },
1248 { SYS_DESC(SYS_PMOVSSET_EL0), access_pmovs, reset_unknown, PMOVSSET_EL0 },
7c8c5e6a 1249
851050a5
MR
1250 { SYS_DESC(SYS_TPIDR_EL0), NULL, reset_unknown, TPIDR_EL0 },
1251 { SYS_DESC(SYS_TPIDRRO_EL0), NULL, reset_unknown, TPIDRRO_EL0 },
62a89c44 1252
b2d693ce
MR
1253 { SYS_DESC(SYS_CNTP_TVAL_EL0), access_cntp_tval },
1254 { SYS_DESC(SYS_CNTP_CTL_EL0), access_cntp_ctl },
1255 { SYS_DESC(SYS_CNTP_CVAL_EL0), access_cntp_cval },
c9a3c58f 1256
051ff581
SZ
1257 /* PMEVCNTRn_EL0 */
1258 PMU_PMEVCNTR_EL0(0),
1259 PMU_PMEVCNTR_EL0(1),
1260 PMU_PMEVCNTR_EL0(2),
1261 PMU_PMEVCNTR_EL0(3),
1262 PMU_PMEVCNTR_EL0(4),
1263 PMU_PMEVCNTR_EL0(5),
1264 PMU_PMEVCNTR_EL0(6),
1265 PMU_PMEVCNTR_EL0(7),
1266 PMU_PMEVCNTR_EL0(8),
1267 PMU_PMEVCNTR_EL0(9),
1268 PMU_PMEVCNTR_EL0(10),
1269 PMU_PMEVCNTR_EL0(11),
1270 PMU_PMEVCNTR_EL0(12),
1271 PMU_PMEVCNTR_EL0(13),
1272 PMU_PMEVCNTR_EL0(14),
1273 PMU_PMEVCNTR_EL0(15),
1274 PMU_PMEVCNTR_EL0(16),
1275 PMU_PMEVCNTR_EL0(17),
1276 PMU_PMEVCNTR_EL0(18),
1277 PMU_PMEVCNTR_EL0(19),
1278 PMU_PMEVCNTR_EL0(20),
1279 PMU_PMEVCNTR_EL0(21),
1280 PMU_PMEVCNTR_EL0(22),
1281 PMU_PMEVCNTR_EL0(23),
1282 PMU_PMEVCNTR_EL0(24),
1283 PMU_PMEVCNTR_EL0(25),
1284 PMU_PMEVCNTR_EL0(26),
1285 PMU_PMEVCNTR_EL0(27),
1286 PMU_PMEVCNTR_EL0(28),
1287 PMU_PMEVCNTR_EL0(29),
1288 PMU_PMEVCNTR_EL0(30),
9feb21ac
SZ
1289 /* PMEVTYPERn_EL0 */
1290 PMU_PMEVTYPER_EL0(0),
1291 PMU_PMEVTYPER_EL0(1),
1292 PMU_PMEVTYPER_EL0(2),
1293 PMU_PMEVTYPER_EL0(3),
1294 PMU_PMEVTYPER_EL0(4),
1295 PMU_PMEVTYPER_EL0(5),
1296 PMU_PMEVTYPER_EL0(6),
1297 PMU_PMEVTYPER_EL0(7),
1298 PMU_PMEVTYPER_EL0(8),
1299 PMU_PMEVTYPER_EL0(9),
1300 PMU_PMEVTYPER_EL0(10),
1301 PMU_PMEVTYPER_EL0(11),
1302 PMU_PMEVTYPER_EL0(12),
1303 PMU_PMEVTYPER_EL0(13),
1304 PMU_PMEVTYPER_EL0(14),
1305 PMU_PMEVTYPER_EL0(15),
1306 PMU_PMEVTYPER_EL0(16),
1307 PMU_PMEVTYPER_EL0(17),
1308 PMU_PMEVTYPER_EL0(18),
1309 PMU_PMEVTYPER_EL0(19),
1310 PMU_PMEVTYPER_EL0(20),
1311 PMU_PMEVTYPER_EL0(21),
1312 PMU_PMEVTYPER_EL0(22),
1313 PMU_PMEVTYPER_EL0(23),
1314 PMU_PMEVTYPER_EL0(24),
1315 PMU_PMEVTYPER_EL0(25),
1316 PMU_PMEVTYPER_EL0(26),
1317 PMU_PMEVTYPER_EL0(27),
1318 PMU_PMEVTYPER_EL0(28),
1319 PMU_PMEVTYPER_EL0(29),
1320 PMU_PMEVTYPER_EL0(30),
174ed3e4
MR
1321 /*
1322 * PMCCFILTR_EL0 resets as unknown in 64bit mode while it resets as zero
9feb21ac
SZ
1323 * in 32bit mode. Here we choose to reset it as zero for consistency.
1324 */
174ed3e4 1325 { SYS_DESC(SYS_PMCCFILTR_EL0), access_pmu_evtyper, reset_val, PMCCFILTR_EL0, 0 },
051ff581 1326
851050a5
MR
1327 { SYS_DESC(SYS_DACR32_EL2), NULL, reset_unknown, DACR32_EL2 },
1328 { SYS_DESC(SYS_IFSR32_EL2), NULL, reset_unknown, IFSR32_EL2 },
1329 { SYS_DESC(SYS_FPEXC32_EL2), NULL, reset_val, FPEXC32_EL2, 0x70 },
62a89c44
MZ
1330};
1331
bdfb4b38 1332static bool trap_dbgidr(struct kvm_vcpu *vcpu,
3fec037d 1333 struct sys_reg_params *p,
bdfb4b38
MZ
1334 const struct sys_reg_desc *r)
1335{
1336 if (p->is_write) {
1337 return ignore_write(vcpu, p);
1338 } else {
46823dd1
DM
1339 u64 dfr = read_sanitised_ftr_reg(SYS_ID_AA64DFR0_EL1);
1340 u64 pfr = read_sanitised_ftr_reg(SYS_ID_AA64PFR0_EL1);
28c5dcb2 1341 u32 el3 = !!cpuid_feature_extract_unsigned_field(pfr, ID_AA64PFR0_EL3_SHIFT);
bdfb4b38 1342
2ec5be3d
PF
1343 p->regval = ((((dfr >> ID_AA64DFR0_WRPS_SHIFT) & 0xf) << 28) |
1344 (((dfr >> ID_AA64DFR0_BRPS_SHIFT) & 0xf) << 24) |
1345 (((dfr >> ID_AA64DFR0_CTX_CMPS_SHIFT) & 0xf) << 20)
1346 | (6 << 16) | (el3 << 14) | (el3 << 12));
bdfb4b38
MZ
1347 return true;
1348 }
1349}
1350
1351static bool trap_debug32(struct kvm_vcpu *vcpu,
3fec037d 1352 struct sys_reg_params *p,
bdfb4b38
MZ
1353 const struct sys_reg_desc *r)
1354{
1355 if (p->is_write) {
2ec5be3d 1356 vcpu_cp14(vcpu, r->reg) = p->regval;
bdfb4b38
MZ
1357 vcpu->arch.debug_flags |= KVM_ARM64_DEBUG_DIRTY;
1358 } else {
2ec5be3d 1359 p->regval = vcpu_cp14(vcpu, r->reg);
bdfb4b38
MZ
1360 }
1361
1362 return true;
1363}
1364
84e690bf
AB
1365/* AArch32 debug register mappings
1366 *
1367 * AArch32 DBGBVRn is mapped to DBGBVRn_EL1[31:0]
1368 * AArch32 DBGBXVRn is mapped to DBGBVRn_EL1[63:32]
1369 *
1370 * All control registers and watchpoint value registers are mapped to
1371 * the lower 32 bits of their AArch64 equivalents. We share the trap
1372 * handlers with the above AArch64 code which checks what mode the
1373 * system is in.
1374 */
1375
281243cb
MZ
1376static bool trap_xvr(struct kvm_vcpu *vcpu,
1377 struct sys_reg_params *p,
1378 const struct sys_reg_desc *rd)
84e690bf
AB
1379{
1380 u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_bvr[rd->reg];
1381
1382 if (p->is_write) {
1383 u64 val = *dbg_reg;
1384
1385 val &= 0xffffffffUL;
2ec5be3d 1386 val |= p->regval << 32;
84e690bf
AB
1387 *dbg_reg = val;
1388
1389 vcpu->arch.debug_flags |= KVM_ARM64_DEBUG_DIRTY;
1390 } else {
2ec5be3d 1391 p->regval = *dbg_reg >> 32;
84e690bf
AB
1392 }
1393
eef8c85a
AB
1394 trace_trap_reg(__func__, rd->reg, p->is_write, *dbg_reg);
1395
84e690bf
AB
1396 return true;
1397}
1398
1399#define DBG_BCR_BVR_WCR_WVR(n) \
1400 /* DBGBVRn */ \
1401 { Op1( 0), CRn( 0), CRm((n)), Op2( 4), trap_bvr, NULL, n }, \
1402 /* DBGBCRn */ \
1403 { Op1( 0), CRn( 0), CRm((n)), Op2( 5), trap_bcr, NULL, n }, \
1404 /* DBGWVRn */ \
1405 { Op1( 0), CRn( 0), CRm((n)), Op2( 6), trap_wvr, NULL, n }, \
1406 /* DBGWCRn */ \
1407 { Op1( 0), CRn( 0), CRm((n)), Op2( 7), trap_wcr, NULL, n }
1408
1409#define DBGBXVR(n) \
1410 { Op1( 0), CRn( 1), CRm((n)), Op2( 1), trap_xvr, NULL, n }
bdfb4b38
MZ
1411
1412/*
1413 * Trapped cp14 registers. We generally ignore most of the external
1414 * debug, on the principle that they don't really make sense to a
84e690bf 1415 * guest. Revisit this one day, would this principle change.
bdfb4b38 1416 */
72564016 1417static const struct sys_reg_desc cp14_regs[] = {
bdfb4b38
MZ
1418 /* DBGIDR */
1419 { Op1( 0), CRn( 0), CRm( 0), Op2( 0), trap_dbgidr },
1420 /* DBGDTRRXext */
1421 { Op1( 0), CRn( 0), CRm( 0), Op2( 2), trap_raz_wi },
1422
1423 DBG_BCR_BVR_WCR_WVR(0),
1424 /* DBGDSCRint */
1425 { Op1( 0), CRn( 0), CRm( 1), Op2( 0), trap_raz_wi },
1426 DBG_BCR_BVR_WCR_WVR(1),
1427 /* DBGDCCINT */
1428 { Op1( 0), CRn( 0), CRm( 2), Op2( 0), trap_debug32 },
1429 /* DBGDSCRext */
1430 { Op1( 0), CRn( 0), CRm( 2), Op2( 2), trap_debug32 },
1431 DBG_BCR_BVR_WCR_WVR(2),
1432 /* DBGDTR[RT]Xint */
1433 { Op1( 0), CRn( 0), CRm( 3), Op2( 0), trap_raz_wi },
1434 /* DBGDTR[RT]Xext */
1435 { Op1( 0), CRn( 0), CRm( 3), Op2( 2), trap_raz_wi },
1436 DBG_BCR_BVR_WCR_WVR(3),
1437 DBG_BCR_BVR_WCR_WVR(4),
1438 DBG_BCR_BVR_WCR_WVR(5),
1439 /* DBGWFAR */
1440 { Op1( 0), CRn( 0), CRm( 6), Op2( 0), trap_raz_wi },
1441 /* DBGOSECCR */
1442 { Op1( 0), CRn( 0), CRm( 6), Op2( 2), trap_raz_wi },
1443 DBG_BCR_BVR_WCR_WVR(6),
1444 /* DBGVCR */
1445 { Op1( 0), CRn( 0), CRm( 7), Op2( 0), trap_debug32 },
1446 DBG_BCR_BVR_WCR_WVR(7),
1447 DBG_BCR_BVR_WCR_WVR(8),
1448 DBG_BCR_BVR_WCR_WVR(9),
1449 DBG_BCR_BVR_WCR_WVR(10),
1450 DBG_BCR_BVR_WCR_WVR(11),
1451 DBG_BCR_BVR_WCR_WVR(12),
1452 DBG_BCR_BVR_WCR_WVR(13),
1453 DBG_BCR_BVR_WCR_WVR(14),
1454 DBG_BCR_BVR_WCR_WVR(15),
1455
1456 /* DBGDRAR (32bit) */
1457 { Op1( 0), CRn( 1), CRm( 0), Op2( 0), trap_raz_wi },
1458
1459 DBGBXVR(0),
1460 /* DBGOSLAR */
1461 { Op1( 0), CRn( 1), CRm( 0), Op2( 4), trap_raz_wi },
1462 DBGBXVR(1),
1463 /* DBGOSLSR */
1464 { Op1( 0), CRn( 1), CRm( 1), Op2( 4), trap_oslsr_el1 },
1465 DBGBXVR(2),
1466 DBGBXVR(3),
1467 /* DBGOSDLR */
1468 { Op1( 0), CRn( 1), CRm( 3), Op2( 4), trap_raz_wi },
1469 DBGBXVR(4),
1470 /* DBGPRCR */
1471 { Op1( 0), CRn( 1), CRm( 4), Op2( 4), trap_raz_wi },
1472 DBGBXVR(5),
1473 DBGBXVR(6),
1474 DBGBXVR(7),
1475 DBGBXVR(8),
1476 DBGBXVR(9),
1477 DBGBXVR(10),
1478 DBGBXVR(11),
1479 DBGBXVR(12),
1480 DBGBXVR(13),
1481 DBGBXVR(14),
1482 DBGBXVR(15),
1483
1484 /* DBGDSAR (32bit) */
1485 { Op1( 0), CRn( 2), CRm( 0), Op2( 0), trap_raz_wi },
1486
1487 /* DBGDEVID2 */
1488 { Op1( 0), CRn( 7), CRm( 0), Op2( 7), trap_raz_wi },
1489 /* DBGDEVID1 */
1490 { Op1( 0), CRn( 7), CRm( 1), Op2( 7), trap_raz_wi },
1491 /* DBGDEVID */
1492 { Op1( 0), CRn( 7), CRm( 2), Op2( 7), trap_raz_wi },
1493 /* DBGCLAIMSET */
1494 { Op1( 0), CRn( 7), CRm( 8), Op2( 6), trap_raz_wi },
1495 /* DBGCLAIMCLR */
1496 { Op1( 0), CRn( 7), CRm( 9), Op2( 6), trap_raz_wi },
1497 /* DBGAUTHSTATUS */
1498 { Op1( 0), CRn( 7), CRm(14), Op2( 6), trap_dbgauthstatus_el1 },
72564016
MZ
1499};
1500
a9866ba0
MZ
1501/* Trapped cp14 64bit registers */
1502static const struct sys_reg_desc cp14_64_regs[] = {
bdfb4b38
MZ
1503 /* DBGDRAR (64bit) */
1504 { Op1( 0), CRm( 1), .access = trap_raz_wi },
1505
1506 /* DBGDSAR (64bit) */
1507 { Op1( 0), CRm( 2), .access = trap_raz_wi },
a9866ba0
MZ
1508};
1509
051ff581
SZ
1510/* Macro to expand the PMEVCNTRn register */
1511#define PMU_PMEVCNTR(n) \
1512 /* PMEVCNTRn */ \
1513 { Op1(0), CRn(0b1110), \
1514 CRm((0b1000 | (((n) >> 3) & 0x3))), Op2(((n) & 0x7)), \
1515 access_pmu_evcntr }
1516
9feb21ac
SZ
1517/* Macro to expand the PMEVTYPERn register */
1518#define PMU_PMEVTYPER(n) \
1519 /* PMEVTYPERn */ \
1520 { Op1(0), CRn(0b1110), \
1521 CRm((0b1100 | (((n) >> 3) & 0x3))), Op2(((n) & 0x7)), \
1522 access_pmu_evtyper }
1523
4d44923b
MZ
1524/*
1525 * Trapped cp15 registers. TTBR0/TTBR1 get a double encoding,
1526 * depending on the way they are accessed (as a 32bit or a 64bit
1527 * register).
1528 */
62a89c44 1529static const struct sys_reg_desc cp15_regs[] = {
6d52f35a
AP
1530 { Op1( 0), CRn( 0), CRm(12), Op2( 0), access_gic_sgi },
1531
3c1e7165 1532 { Op1( 0), CRn( 1), CRm( 0), Op2( 0), access_vm_reg, NULL, c1_SCTLR },
4d44923b
MZ
1533 { Op1( 0), CRn( 2), CRm( 0), Op2( 0), access_vm_reg, NULL, c2_TTBR0 },
1534 { Op1( 0), CRn( 2), CRm( 0), Op2( 1), access_vm_reg, NULL, c2_TTBR1 },
1535 { Op1( 0), CRn( 2), CRm( 0), Op2( 2), access_vm_reg, NULL, c2_TTBCR },
1536 { Op1( 0), CRn( 3), CRm( 0), Op2( 0), access_vm_reg, NULL, c3_DACR },
1537 { Op1( 0), CRn( 5), CRm( 0), Op2( 0), access_vm_reg, NULL, c5_DFSR },
1538 { Op1( 0), CRn( 5), CRm( 0), Op2( 1), access_vm_reg, NULL, c5_IFSR },
1539 { Op1( 0), CRn( 5), CRm( 1), Op2( 0), access_vm_reg, NULL, c5_ADFSR },
1540 { Op1( 0), CRn( 5), CRm( 1), Op2( 1), access_vm_reg, NULL, c5_AIFSR },
1541 { Op1( 0), CRn( 6), CRm( 0), Op2( 0), access_vm_reg, NULL, c6_DFAR },
1542 { Op1( 0), CRn( 6), CRm( 0), Op2( 2), access_vm_reg, NULL, c6_IFAR },
1543
62a89c44
MZ
1544 /*
1545 * DC{C,I,CI}SW operations:
1546 */
1547 { Op1( 0), CRn( 7), CRm( 6), Op2( 2), access_dcsw },
1548 { Op1( 0), CRn( 7), CRm(10), Op2( 2), access_dcsw },
1549 { Op1( 0), CRn( 7), CRm(14), Op2( 2), access_dcsw },
4d44923b 1550
7609c125 1551 /* PMU */
ab946834 1552 { Op1( 0), CRn( 9), CRm(12), Op2( 0), access_pmcr },
96b0eebc
SZ
1553 { Op1( 0), CRn( 9), CRm(12), Op2( 1), access_pmcnten },
1554 { Op1( 0), CRn( 9), CRm(12), Op2( 2), access_pmcnten },
76d883c4 1555 { Op1( 0), CRn( 9), CRm(12), Op2( 3), access_pmovs },
7a0adc70 1556 { Op1( 0), CRn( 9), CRm(12), Op2( 4), access_pmswinc },
3965c3ce 1557 { Op1( 0), CRn( 9), CRm(12), Op2( 5), access_pmselr },
a86b5505
SZ
1558 { Op1( 0), CRn( 9), CRm(12), Op2( 6), access_pmceid },
1559 { Op1( 0), CRn( 9), CRm(12), Op2( 7), access_pmceid },
051ff581 1560 { Op1( 0), CRn( 9), CRm(13), Op2( 0), access_pmu_evcntr },
9feb21ac 1561 { Op1( 0), CRn( 9), CRm(13), Op2( 1), access_pmu_evtyper },
051ff581 1562 { Op1( 0), CRn( 9), CRm(13), Op2( 2), access_pmu_evcntr },
d692b8ad 1563 { Op1( 0), CRn( 9), CRm(14), Op2( 0), access_pmuserenr },
9db52c78
SZ
1564 { Op1( 0), CRn( 9), CRm(14), Op2( 1), access_pminten },
1565 { Op1( 0), CRn( 9), CRm(14), Op2( 2), access_pminten },
76d883c4 1566 { Op1( 0), CRn( 9), CRm(14), Op2( 3), access_pmovs },
4d44923b
MZ
1567
1568 { Op1( 0), CRn(10), CRm( 2), Op2( 0), access_vm_reg, NULL, c10_PRRR },
1569 { Op1( 0), CRn(10), CRm( 2), Op2( 1), access_vm_reg, NULL, c10_NMRR },
1570 { Op1( 0), CRn(10), CRm( 3), Op2( 0), access_vm_reg, NULL, c10_AMAIR0 },
1571 { Op1( 0), CRn(10), CRm( 3), Op2( 1), access_vm_reg, NULL, c10_AMAIR1 },
db7dedd0
CD
1572
1573 /* ICC_SRE */
f7f6f2d9 1574 { Op1( 0), CRn(12), CRm(12), Op2( 5), access_gic_sre },
db7dedd0 1575
4d44923b 1576 { Op1( 0), CRn(13), CRm( 0), Op2( 1), access_vm_reg, NULL, c13_CID },
051ff581 1577
eac137b4
JF
1578 /* CNTP_TVAL */
1579 { Op1( 0), CRn(14), CRm( 2), Op2( 0), access_cntp_tval },
1580 /* CNTP_CTL */
1581 { Op1( 0), CRn(14), CRm( 2), Op2( 1), access_cntp_ctl },
1582
051ff581
SZ
1583 /* PMEVCNTRn */
1584 PMU_PMEVCNTR(0),
1585 PMU_PMEVCNTR(1),
1586 PMU_PMEVCNTR(2),
1587 PMU_PMEVCNTR(3),
1588 PMU_PMEVCNTR(4),
1589 PMU_PMEVCNTR(5),
1590 PMU_PMEVCNTR(6),
1591 PMU_PMEVCNTR(7),
1592 PMU_PMEVCNTR(8),
1593 PMU_PMEVCNTR(9),
1594 PMU_PMEVCNTR(10),
1595 PMU_PMEVCNTR(11),
1596 PMU_PMEVCNTR(12),
1597 PMU_PMEVCNTR(13),
1598 PMU_PMEVCNTR(14),
1599 PMU_PMEVCNTR(15),
1600 PMU_PMEVCNTR(16),
1601 PMU_PMEVCNTR(17),
1602 PMU_PMEVCNTR(18),
1603 PMU_PMEVCNTR(19),
1604 PMU_PMEVCNTR(20),
1605 PMU_PMEVCNTR(21),
1606 PMU_PMEVCNTR(22),
1607 PMU_PMEVCNTR(23),
1608 PMU_PMEVCNTR(24),
1609 PMU_PMEVCNTR(25),
1610 PMU_PMEVCNTR(26),
1611 PMU_PMEVCNTR(27),
1612 PMU_PMEVCNTR(28),
1613 PMU_PMEVCNTR(29),
1614 PMU_PMEVCNTR(30),
9feb21ac
SZ
1615 /* PMEVTYPERn */
1616 PMU_PMEVTYPER(0),
1617 PMU_PMEVTYPER(1),
1618 PMU_PMEVTYPER(2),
1619 PMU_PMEVTYPER(3),
1620 PMU_PMEVTYPER(4),
1621 PMU_PMEVTYPER(5),
1622 PMU_PMEVTYPER(6),
1623 PMU_PMEVTYPER(7),
1624 PMU_PMEVTYPER(8),
1625 PMU_PMEVTYPER(9),
1626 PMU_PMEVTYPER(10),
1627 PMU_PMEVTYPER(11),
1628 PMU_PMEVTYPER(12),
1629 PMU_PMEVTYPER(13),
1630 PMU_PMEVTYPER(14),
1631 PMU_PMEVTYPER(15),
1632 PMU_PMEVTYPER(16),
1633 PMU_PMEVTYPER(17),
1634 PMU_PMEVTYPER(18),
1635 PMU_PMEVTYPER(19),
1636 PMU_PMEVTYPER(20),
1637 PMU_PMEVTYPER(21),
1638 PMU_PMEVTYPER(22),
1639 PMU_PMEVTYPER(23),
1640 PMU_PMEVTYPER(24),
1641 PMU_PMEVTYPER(25),
1642 PMU_PMEVTYPER(26),
1643 PMU_PMEVTYPER(27),
1644 PMU_PMEVTYPER(28),
1645 PMU_PMEVTYPER(29),
1646 PMU_PMEVTYPER(30),
1647 /* PMCCFILTR */
1648 { Op1(0), CRn(14), CRm(15), Op2(7), access_pmu_evtyper },
a9866ba0
MZ
1649};
1650
1651static const struct sys_reg_desc cp15_64_regs[] = {
1652 { Op1( 0), CRn( 0), CRm( 2), Op2( 0), access_vm_reg, NULL, c2_TTBR0 },
051ff581 1653 { Op1( 0), CRn( 0), CRm( 9), Op2( 0), access_pmu_evcntr },
6d52f35a 1654 { Op1( 0), CRn( 0), CRm(12), Op2( 0), access_gic_sgi },
4d44923b 1655 { Op1( 1), CRn( 0), CRm( 2), Op2( 0), access_vm_reg, NULL, c2_TTBR1 },
eac137b4 1656 { Op1( 2), CRn( 0), CRm(14), Op2( 0), access_cntp_cval },
7c8c5e6a
MZ
1657};
1658
1659/* Target specific emulation tables */
1660static struct kvm_sys_reg_target_table *target_tables[KVM_ARM_NUM_TARGETS];
1661
1662void kvm_register_target_sys_reg_table(unsigned int target,
1663 struct kvm_sys_reg_target_table *table)
1664{
1665 target_tables[target] = table;
1666}
1667
1668/* Get specific register table for this target. */
62a89c44
MZ
1669static const struct sys_reg_desc *get_target_table(unsigned target,
1670 bool mode_is_64,
1671 size_t *num)
7c8c5e6a
MZ
1672{
1673 struct kvm_sys_reg_target_table *table;
1674
1675 table = target_tables[target];
62a89c44
MZ
1676 if (mode_is_64) {
1677 *num = table->table64.num;
1678 return table->table64.table;
1679 } else {
1680 *num = table->table32.num;
1681 return table->table32.table;
1682 }
7c8c5e6a
MZ
1683}
1684
623eefa8
MZ
1685#define reg_to_match_value(x) \
1686 ({ \
1687 unsigned long val; \
1688 val = (x)->Op0 << 14; \
1689 val |= (x)->Op1 << 11; \
1690 val |= (x)->CRn << 7; \
1691 val |= (x)->CRm << 3; \
1692 val |= (x)->Op2; \
1693 val; \
1694 })
1695
1696static int match_sys_reg(const void *key, const void *elt)
1697{
1698 const unsigned long pval = (unsigned long)key;
1699 const struct sys_reg_desc *r = elt;
1700
1701 return pval - reg_to_match_value(r);
1702}
1703
7c8c5e6a
MZ
1704static const struct sys_reg_desc *find_reg(const struct sys_reg_params *params,
1705 const struct sys_reg_desc table[],
1706 unsigned int num)
1707{
623eefa8
MZ
1708 unsigned long pval = reg_to_match_value(params);
1709
1710 return bsearch((void *)pval, table, num, sizeof(table[0]), match_sys_reg);
7c8c5e6a
MZ
1711}
1712
62a89c44
MZ
1713int kvm_handle_cp14_load_store(struct kvm_vcpu *vcpu, struct kvm_run *run)
1714{
1715 kvm_inject_undefined(vcpu);
1716 return 1;
1717}
1718
e70b9522
MZ
1719static void perform_access(struct kvm_vcpu *vcpu,
1720 struct sys_reg_params *params,
1721 const struct sys_reg_desc *r)
1722{
1723 /*
1724 * Not having an accessor means that we have configured a trap
1725 * that we don't know how to handle. This certainly qualifies
1726 * as a gross bug that should be fixed right away.
1727 */
1728 BUG_ON(!r->access);
1729
1730 /* Skip instruction if instructed so */
1731 if (likely(r->access(vcpu, params, r)))
1732 kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu));
1733}
1734
72564016
MZ
1735/*
1736 * emulate_cp -- tries to match a sys_reg access in a handling table, and
1737 * call the corresponding trap handler.
1738 *
1739 * @params: pointer to the descriptor of the access
1740 * @table: array of trap descriptors
1741 * @num: size of the trap descriptor array
1742 *
1743 * Return 0 if the access has been handled, and -1 if not.
1744 */
1745static int emulate_cp(struct kvm_vcpu *vcpu,
3fec037d 1746 struct sys_reg_params *params,
72564016
MZ
1747 const struct sys_reg_desc *table,
1748 size_t num)
62a89c44 1749{
72564016 1750 const struct sys_reg_desc *r;
62a89c44 1751
72564016
MZ
1752 if (!table)
1753 return -1; /* Not handled */
62a89c44 1754
62a89c44 1755 r = find_reg(params, table, num);
62a89c44 1756
72564016 1757 if (r) {
e70b9522
MZ
1758 perform_access(vcpu, params, r);
1759 return 0;
72564016
MZ
1760 }
1761
1762 /* Not handled */
1763 return -1;
1764}
1765
1766static void unhandled_cp_access(struct kvm_vcpu *vcpu,
1767 struct sys_reg_params *params)
1768{
1769 u8 hsr_ec = kvm_vcpu_trap_get_class(vcpu);
40c4f8d2 1770 int cp = -1;
72564016
MZ
1771
1772 switch(hsr_ec) {
c6d01a94
MR
1773 case ESR_ELx_EC_CP15_32:
1774 case ESR_ELx_EC_CP15_64:
72564016
MZ
1775 cp = 15;
1776 break;
c6d01a94
MR
1777 case ESR_ELx_EC_CP14_MR:
1778 case ESR_ELx_EC_CP14_64:
72564016
MZ
1779 cp = 14;
1780 break;
1781 default:
40c4f8d2 1782 WARN_ON(1);
62a89c44
MZ
1783 }
1784
72564016
MZ
1785 kvm_err("Unsupported guest CP%d access at: %08lx\n",
1786 cp, *vcpu_pc(vcpu));
62a89c44
MZ
1787 print_sys_reg_instr(params);
1788 kvm_inject_undefined(vcpu);
1789}
1790
1791/**
7769db90 1792 * kvm_handle_cp_64 -- handles a mrrc/mcrr trap on a guest CP14/CP15 access
62a89c44
MZ
1793 * @vcpu: The VCPU pointer
1794 * @run: The kvm_run struct
1795 */
72564016
MZ
1796static int kvm_handle_cp_64(struct kvm_vcpu *vcpu,
1797 const struct sys_reg_desc *global,
1798 size_t nr_global,
1799 const struct sys_reg_desc *target_specific,
1800 size_t nr_specific)
62a89c44
MZ
1801{
1802 struct sys_reg_params params;
1803 u32 hsr = kvm_vcpu_get_hsr(vcpu);
c667186f
MZ
1804 int Rt = kvm_vcpu_sys_get_rt(vcpu);
1805 int Rt2 = (hsr >> 10) & 0x1f;
62a89c44 1806
2072d29c
MZ
1807 params.is_aarch32 = true;
1808 params.is_32bit = false;
62a89c44 1809 params.CRm = (hsr >> 1) & 0xf;
62a89c44
MZ
1810 params.is_write = ((hsr & 1) == 0);
1811
1812 params.Op0 = 0;
1813 params.Op1 = (hsr >> 16) & 0xf;
1814 params.Op2 = 0;
1815 params.CRn = 0;
1816
1817 /*
2ec5be3d 1818 * Make a 64-bit value out of Rt and Rt2. As we use the same trap
62a89c44
MZ
1819 * backends between AArch32 and AArch64, we get away with it.
1820 */
1821 if (params.is_write) {
2ec5be3d
PF
1822 params.regval = vcpu_get_reg(vcpu, Rt) & 0xffffffff;
1823 params.regval |= vcpu_get_reg(vcpu, Rt2) << 32;
62a89c44
MZ
1824 }
1825
b6b7a806
MZ
1826 /*
1827 * Try to emulate the coprocessor access using the target
1828 * specific table first, and using the global table afterwards.
1829 * If either of the tables contains a handler, handle the
1830 * potential register operation in the case of a read and return
1831 * with success.
1832 */
1833 if (!emulate_cp(vcpu, &params, target_specific, nr_specific) ||
1834 !emulate_cp(vcpu, &params, global, nr_global)) {
1835 /* Split up the value between registers for the read side */
1836 if (!params.is_write) {
1837 vcpu_set_reg(vcpu, Rt, lower_32_bits(params.regval));
1838 vcpu_set_reg(vcpu, Rt2, upper_32_bits(params.regval));
1839 }
62a89c44 1840
b6b7a806 1841 return 1;
62a89c44
MZ
1842 }
1843
b6b7a806 1844 unhandled_cp_access(vcpu, &params);
62a89c44
MZ
1845 return 1;
1846}
1847
1848/**
7769db90 1849 * kvm_handle_cp_32 -- handles a mrc/mcr trap on a guest CP14/CP15 access
62a89c44
MZ
1850 * @vcpu: The VCPU pointer
1851 * @run: The kvm_run struct
1852 */
72564016
MZ
1853static int kvm_handle_cp_32(struct kvm_vcpu *vcpu,
1854 const struct sys_reg_desc *global,
1855 size_t nr_global,
1856 const struct sys_reg_desc *target_specific,
1857 size_t nr_specific)
62a89c44
MZ
1858{
1859 struct sys_reg_params params;
1860 u32 hsr = kvm_vcpu_get_hsr(vcpu);
c667186f 1861 int Rt = kvm_vcpu_sys_get_rt(vcpu);
62a89c44 1862
2072d29c
MZ
1863 params.is_aarch32 = true;
1864 params.is_32bit = true;
62a89c44 1865 params.CRm = (hsr >> 1) & 0xf;
2ec5be3d 1866 params.regval = vcpu_get_reg(vcpu, Rt);
62a89c44
MZ
1867 params.is_write = ((hsr & 1) == 0);
1868 params.CRn = (hsr >> 10) & 0xf;
1869 params.Op0 = 0;
1870 params.Op1 = (hsr >> 14) & 0x7;
1871 params.Op2 = (hsr >> 17) & 0x7;
1872
2ec5be3d
PF
1873 if (!emulate_cp(vcpu, &params, target_specific, nr_specific) ||
1874 !emulate_cp(vcpu, &params, global, nr_global)) {
1875 if (!params.is_write)
1876 vcpu_set_reg(vcpu, Rt, params.regval);
72564016 1877 return 1;
2ec5be3d 1878 }
72564016
MZ
1879
1880 unhandled_cp_access(vcpu, &params);
62a89c44
MZ
1881 return 1;
1882}
1883
72564016
MZ
1884int kvm_handle_cp15_64(struct kvm_vcpu *vcpu, struct kvm_run *run)
1885{
1886 const struct sys_reg_desc *target_specific;
1887 size_t num;
1888
1889 target_specific = get_target_table(vcpu->arch.target, false, &num);
1890 return kvm_handle_cp_64(vcpu,
a9866ba0 1891 cp15_64_regs, ARRAY_SIZE(cp15_64_regs),
72564016
MZ
1892 target_specific, num);
1893}
1894
1895int kvm_handle_cp15_32(struct kvm_vcpu *vcpu, struct kvm_run *run)
1896{
1897 const struct sys_reg_desc *target_specific;
1898 size_t num;
1899
1900 target_specific = get_target_table(vcpu->arch.target, false, &num);
1901 return kvm_handle_cp_32(vcpu,
1902 cp15_regs, ARRAY_SIZE(cp15_regs),
1903 target_specific, num);
1904}
1905
1906int kvm_handle_cp14_64(struct kvm_vcpu *vcpu, struct kvm_run *run)
1907{
1908 return kvm_handle_cp_64(vcpu,
a9866ba0 1909 cp14_64_regs, ARRAY_SIZE(cp14_64_regs),
72564016
MZ
1910 NULL, 0);
1911}
1912
1913int kvm_handle_cp14_32(struct kvm_vcpu *vcpu, struct kvm_run *run)
1914{
1915 return kvm_handle_cp_32(vcpu,
1916 cp14_regs, ARRAY_SIZE(cp14_regs),
1917 NULL, 0);
1918}
1919
7c8c5e6a 1920static int emulate_sys_reg(struct kvm_vcpu *vcpu,
3fec037d 1921 struct sys_reg_params *params)
7c8c5e6a
MZ
1922{
1923 size_t num;
1924 const struct sys_reg_desc *table, *r;
1925
62a89c44 1926 table = get_target_table(vcpu->arch.target, true, &num);
7c8c5e6a
MZ
1927
1928 /* Search target-specific then generic table. */
1929 r = find_reg(params, table, num);
1930 if (!r)
1931 r = find_reg(params, sys_reg_descs, ARRAY_SIZE(sys_reg_descs));
1932
1933 if (likely(r)) {
e70b9522 1934 perform_access(vcpu, params, r);
7c8c5e6a
MZ
1935 } else {
1936 kvm_err("Unsupported guest sys_reg access at: %lx\n",
1937 *vcpu_pc(vcpu));
1938 print_sys_reg_instr(params);
e70b9522 1939 kvm_inject_undefined(vcpu);
7c8c5e6a 1940 }
7c8c5e6a
MZ
1941 return 1;
1942}
1943
1944static void reset_sys_reg_descs(struct kvm_vcpu *vcpu,
1945 const struct sys_reg_desc *table, size_t num)
1946{
1947 unsigned long i;
1948
1949 for (i = 0; i < num; i++)
1950 if (table[i].reset)
1951 table[i].reset(vcpu, &table[i]);
1952}
1953
1954/**
1955 * kvm_handle_sys_reg -- handles a mrs/msr trap on a guest sys_reg access
1956 * @vcpu: The VCPU pointer
1957 * @run: The kvm_run struct
1958 */
1959int kvm_handle_sys_reg(struct kvm_vcpu *vcpu, struct kvm_run *run)
1960{
1961 struct sys_reg_params params;
1962 unsigned long esr = kvm_vcpu_get_hsr(vcpu);
c667186f 1963 int Rt = kvm_vcpu_sys_get_rt(vcpu);
2ec5be3d 1964 int ret;
7c8c5e6a 1965
eef8c85a
AB
1966 trace_kvm_handle_sys_reg(esr);
1967
2072d29c
MZ
1968 params.is_aarch32 = false;
1969 params.is_32bit = false;
7c8c5e6a
MZ
1970 params.Op0 = (esr >> 20) & 3;
1971 params.Op1 = (esr >> 14) & 0x7;
1972 params.CRn = (esr >> 10) & 0xf;
1973 params.CRm = (esr >> 1) & 0xf;
1974 params.Op2 = (esr >> 17) & 0x7;
2ec5be3d 1975 params.regval = vcpu_get_reg(vcpu, Rt);
7c8c5e6a
MZ
1976 params.is_write = !(esr & 1);
1977
2ec5be3d
PF
1978 ret = emulate_sys_reg(vcpu, &params);
1979
1980 if (!params.is_write)
1981 vcpu_set_reg(vcpu, Rt, params.regval);
1982 return ret;
7c8c5e6a
MZ
1983}
1984
1985/******************************************************************************
1986 * Userspace API
1987 *****************************************************************************/
1988
1989static bool index_to_params(u64 id, struct sys_reg_params *params)
1990{
1991 switch (id & KVM_REG_SIZE_MASK) {
1992 case KVM_REG_SIZE_U64:
1993 /* Any unused index bits means it's not valid. */
1994 if (id & ~(KVM_REG_ARCH_MASK | KVM_REG_SIZE_MASK
1995 | KVM_REG_ARM_COPROC_MASK
1996 | KVM_REG_ARM64_SYSREG_OP0_MASK
1997 | KVM_REG_ARM64_SYSREG_OP1_MASK
1998 | KVM_REG_ARM64_SYSREG_CRN_MASK
1999 | KVM_REG_ARM64_SYSREG_CRM_MASK
2000 | KVM_REG_ARM64_SYSREG_OP2_MASK))
2001 return false;
2002 params->Op0 = ((id & KVM_REG_ARM64_SYSREG_OP0_MASK)
2003 >> KVM_REG_ARM64_SYSREG_OP0_SHIFT);
2004 params->Op1 = ((id & KVM_REG_ARM64_SYSREG_OP1_MASK)
2005 >> KVM_REG_ARM64_SYSREG_OP1_SHIFT);
2006 params->CRn = ((id & KVM_REG_ARM64_SYSREG_CRN_MASK)
2007 >> KVM_REG_ARM64_SYSREG_CRN_SHIFT);
2008 params->CRm = ((id & KVM_REG_ARM64_SYSREG_CRM_MASK)
2009 >> KVM_REG_ARM64_SYSREG_CRM_SHIFT);
2010 params->Op2 = ((id & KVM_REG_ARM64_SYSREG_OP2_MASK)
2011 >> KVM_REG_ARM64_SYSREG_OP2_SHIFT);
2012 return true;
2013 default:
2014 return false;
2015 }
2016}
2017
4b927b94
VK
2018const struct sys_reg_desc *find_reg_by_id(u64 id,
2019 struct sys_reg_params *params,
2020 const struct sys_reg_desc table[],
2021 unsigned int num)
2022{
2023 if (!index_to_params(id, params))
2024 return NULL;
2025
2026 return find_reg(params, table, num);
2027}
2028
7c8c5e6a
MZ
2029/* Decode an index value, and find the sys_reg_desc entry. */
2030static const struct sys_reg_desc *index_to_sys_reg_desc(struct kvm_vcpu *vcpu,
2031 u64 id)
2032{
2033 size_t num;
2034 const struct sys_reg_desc *table, *r;
2035 struct sys_reg_params params;
2036
2037 /* We only do sys_reg for now. */
2038 if ((id & KVM_REG_ARM_COPROC_MASK) != KVM_REG_ARM64_SYSREG)
2039 return NULL;
2040
62a89c44 2041 table = get_target_table(vcpu->arch.target, true, &num);
4b927b94 2042 r = find_reg_by_id(id, &params, table, num);
7c8c5e6a
MZ
2043 if (!r)
2044 r = find_reg(&params, sys_reg_descs, ARRAY_SIZE(sys_reg_descs));
2045
93390c0a
DM
2046 /* Not saved in the sys_reg array and not otherwise accessible? */
2047 if (r && !(r->reg || r->get_user))
7c8c5e6a
MZ
2048 r = NULL;
2049
2050 return r;
2051}
2052
2053/*
2054 * These are the invariant sys_reg registers: we let the guest see the
2055 * host versions of these, so they're part of the guest state.
2056 *
2057 * A future CPU may provide a mechanism to present different values to
2058 * the guest, or a future kvm may trap them.
2059 */
2060
2061#define FUNCTION_INVARIANT(reg) \
2062 static void get_##reg(struct kvm_vcpu *v, \
2063 const struct sys_reg_desc *r) \
2064 { \
1f3d8699 2065 ((struct sys_reg_desc *)r)->val = read_sysreg(reg); \
7c8c5e6a
MZ
2066 }
2067
2068FUNCTION_INVARIANT(midr_el1)
2069FUNCTION_INVARIANT(ctr_el0)
2070FUNCTION_INVARIANT(revidr_el1)
7c8c5e6a
MZ
2071FUNCTION_INVARIANT(clidr_el1)
2072FUNCTION_INVARIANT(aidr_el1)
2073
2074/* ->val is filled in by kvm_sys_reg_table_init() */
2075static struct sys_reg_desc invariant_sys_regs[] = {
0d449541
MR
2076 { SYS_DESC(SYS_MIDR_EL1), NULL, get_midr_el1 },
2077 { SYS_DESC(SYS_REVIDR_EL1), NULL, get_revidr_el1 },
0d449541
MR
2078 { SYS_DESC(SYS_CLIDR_EL1), NULL, get_clidr_el1 },
2079 { SYS_DESC(SYS_AIDR_EL1), NULL, get_aidr_el1 },
2080 { SYS_DESC(SYS_CTR_EL0), NULL, get_ctr_el0 },
7c8c5e6a
MZ
2081};
2082
26c99af1 2083static int reg_from_user(u64 *val, const void __user *uaddr, u64 id)
7c8c5e6a 2084{
7c8c5e6a
MZ
2085 if (copy_from_user(val, uaddr, KVM_REG_SIZE(id)) != 0)
2086 return -EFAULT;
2087 return 0;
2088}
2089
26c99af1 2090static int reg_to_user(void __user *uaddr, const u64 *val, u64 id)
7c8c5e6a 2091{
7c8c5e6a
MZ
2092 if (copy_to_user(uaddr, val, KVM_REG_SIZE(id)) != 0)
2093 return -EFAULT;
2094 return 0;
2095}
2096
2097static int get_invariant_sys_reg(u64 id, void __user *uaddr)
2098{
2099 struct sys_reg_params params;
2100 const struct sys_reg_desc *r;
2101
4b927b94
VK
2102 r = find_reg_by_id(id, &params, invariant_sys_regs,
2103 ARRAY_SIZE(invariant_sys_regs));
7c8c5e6a
MZ
2104 if (!r)
2105 return -ENOENT;
2106
2107 return reg_to_user(uaddr, &r->val, id);
2108}
2109
2110static int set_invariant_sys_reg(u64 id, void __user *uaddr)
2111{
2112 struct sys_reg_params params;
2113 const struct sys_reg_desc *r;
2114 int err;
2115 u64 val = 0; /* Make sure high bits are 0 for 32-bit regs */
2116
4b927b94
VK
2117 r = find_reg_by_id(id, &params, invariant_sys_regs,
2118 ARRAY_SIZE(invariant_sys_regs));
7c8c5e6a
MZ
2119 if (!r)
2120 return -ENOENT;
2121
2122 err = reg_from_user(&val, uaddr, id);
2123 if (err)
2124 return err;
2125
2126 /* This is what we mean by invariant: you can't change it. */
2127 if (r->val != val)
2128 return -EINVAL;
2129
2130 return 0;
2131}
2132
2133static bool is_valid_cache(u32 val)
2134{
2135 u32 level, ctype;
2136
2137 if (val >= CSSELR_MAX)
18d45766 2138 return false;
7c8c5e6a
MZ
2139
2140 /* Bottom bit is Instruction or Data bit. Next 3 bits are level. */
2141 level = (val >> 1);
2142 ctype = (cache_levels >> (level * 3)) & 7;
2143
2144 switch (ctype) {
2145 case 0: /* No cache */
2146 return false;
2147 case 1: /* Instruction cache only */
2148 return (val & 1);
2149 case 2: /* Data cache only */
2150 case 4: /* Unified cache */
2151 return !(val & 1);
2152 case 3: /* Separate instruction and data caches */
2153 return true;
2154 default: /* Reserved: we can't know instruction or data. */
2155 return false;
2156 }
2157}
2158
2159static int demux_c15_get(u64 id, void __user *uaddr)
2160{
2161 u32 val;
2162 u32 __user *uval = uaddr;
2163
2164 /* Fail if we have unknown bits set. */
2165 if (id & ~(KVM_REG_ARCH_MASK|KVM_REG_SIZE_MASK|KVM_REG_ARM_COPROC_MASK
2166 | ((1 << KVM_REG_ARM_COPROC_SHIFT)-1)))
2167 return -ENOENT;
2168
2169 switch (id & KVM_REG_ARM_DEMUX_ID_MASK) {
2170 case KVM_REG_ARM_DEMUX_ID_CCSIDR:
2171 if (KVM_REG_SIZE(id) != 4)
2172 return -ENOENT;
2173 val = (id & KVM_REG_ARM_DEMUX_VAL_MASK)
2174 >> KVM_REG_ARM_DEMUX_VAL_SHIFT;
2175 if (!is_valid_cache(val))
2176 return -ENOENT;
2177
2178 return put_user(get_ccsidr(val), uval);
2179 default:
2180 return -ENOENT;
2181 }
2182}
2183
2184static int demux_c15_set(u64 id, void __user *uaddr)
2185{
2186 u32 val, newval;
2187 u32 __user *uval = uaddr;
2188
2189 /* Fail if we have unknown bits set. */
2190 if (id & ~(KVM_REG_ARCH_MASK|KVM_REG_SIZE_MASK|KVM_REG_ARM_COPROC_MASK
2191 | ((1 << KVM_REG_ARM_COPROC_SHIFT)-1)))
2192 return -ENOENT;
2193
2194 switch (id & KVM_REG_ARM_DEMUX_ID_MASK) {
2195 case KVM_REG_ARM_DEMUX_ID_CCSIDR:
2196 if (KVM_REG_SIZE(id) != 4)
2197 return -ENOENT;
2198 val = (id & KVM_REG_ARM_DEMUX_VAL_MASK)
2199 >> KVM_REG_ARM_DEMUX_VAL_SHIFT;
2200 if (!is_valid_cache(val))
2201 return -ENOENT;
2202
2203 if (get_user(newval, uval))
2204 return -EFAULT;
2205
2206 /* This is also invariant: you can't change it. */
2207 if (newval != get_ccsidr(val))
2208 return -EINVAL;
2209 return 0;
2210 default:
2211 return -ENOENT;
2212 }
2213}
2214
2215int kvm_arm_sys_reg_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
2216{
2217 const struct sys_reg_desc *r;
2218 void __user *uaddr = (void __user *)(unsigned long)reg->addr;
2219
2220 if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_DEMUX)
2221 return demux_c15_get(reg->id, uaddr);
2222
2223 if (KVM_REG_SIZE(reg->id) != sizeof(__u64))
2224 return -ENOENT;
2225
2226 r = index_to_sys_reg_desc(vcpu, reg->id);
2227 if (!r)
2228 return get_invariant_sys_reg(reg->id, uaddr);
2229
84e690bf
AB
2230 if (r->get_user)
2231 return (r->get_user)(vcpu, r, reg, uaddr);
2232
7c8c5e6a
MZ
2233 return reg_to_user(uaddr, &vcpu_sys_reg(vcpu, r->reg), reg->id);
2234}
2235
2236int kvm_arm_sys_reg_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
2237{
2238 const struct sys_reg_desc *r;
2239 void __user *uaddr = (void __user *)(unsigned long)reg->addr;
2240
2241 if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_DEMUX)
2242 return demux_c15_set(reg->id, uaddr);
2243
2244 if (KVM_REG_SIZE(reg->id) != sizeof(__u64))
2245 return -ENOENT;
2246
2247 r = index_to_sys_reg_desc(vcpu, reg->id);
2248 if (!r)
2249 return set_invariant_sys_reg(reg->id, uaddr);
2250
84e690bf
AB
2251 if (r->set_user)
2252 return (r->set_user)(vcpu, r, reg, uaddr);
2253
7c8c5e6a
MZ
2254 return reg_from_user(&vcpu_sys_reg(vcpu, r->reg), uaddr, reg->id);
2255}
2256
2257static unsigned int num_demux_regs(void)
2258{
2259 unsigned int i, count = 0;
2260
2261 for (i = 0; i < CSSELR_MAX; i++)
2262 if (is_valid_cache(i))
2263 count++;
2264
2265 return count;
2266}
2267
2268static int write_demux_regids(u64 __user *uindices)
2269{
efd48cea 2270 u64 val = KVM_REG_ARM64 | KVM_REG_SIZE_U32 | KVM_REG_ARM_DEMUX;
7c8c5e6a
MZ
2271 unsigned int i;
2272
2273 val |= KVM_REG_ARM_DEMUX_ID_CCSIDR;
2274 for (i = 0; i < CSSELR_MAX; i++) {
2275 if (!is_valid_cache(i))
2276 continue;
2277 if (put_user(val | i, uindices))
2278 return -EFAULT;
2279 uindices++;
2280 }
2281 return 0;
2282}
2283
2284static u64 sys_reg_to_index(const struct sys_reg_desc *reg)
2285{
2286 return (KVM_REG_ARM64 | KVM_REG_SIZE_U64 |
2287 KVM_REG_ARM64_SYSREG |
2288 (reg->Op0 << KVM_REG_ARM64_SYSREG_OP0_SHIFT) |
2289 (reg->Op1 << KVM_REG_ARM64_SYSREG_OP1_SHIFT) |
2290 (reg->CRn << KVM_REG_ARM64_SYSREG_CRN_SHIFT) |
2291 (reg->CRm << KVM_REG_ARM64_SYSREG_CRM_SHIFT) |
2292 (reg->Op2 << KVM_REG_ARM64_SYSREG_OP2_SHIFT));
2293}
2294
2295static bool copy_reg_to_user(const struct sys_reg_desc *reg, u64 __user **uind)
2296{
2297 if (!*uind)
2298 return true;
2299
2300 if (put_user(sys_reg_to_index(reg), *uind))
2301 return false;
2302
2303 (*uind)++;
2304 return true;
2305}
2306
93390c0a
DM
2307static int walk_one_sys_reg(const struct sys_reg_desc *rd,
2308 u64 __user **uind,
2309 unsigned int *total)
2310{
2311 /*
2312 * Ignore registers we trap but don't save,
2313 * and for which no custom user accessor is provided.
2314 */
2315 if (!(rd->reg || rd->get_user))
2316 return 0;
2317
2318 if (!copy_reg_to_user(rd, uind))
2319 return -EFAULT;
2320
2321 (*total)++;
2322 return 0;
2323}
2324
7c8c5e6a
MZ
2325/* Assumed ordered tables, see kvm_sys_reg_table_init. */
2326static int walk_sys_regs(struct kvm_vcpu *vcpu, u64 __user *uind)
2327{
2328 const struct sys_reg_desc *i1, *i2, *end1, *end2;
2329 unsigned int total = 0;
2330 size_t num;
93390c0a 2331 int err;
7c8c5e6a
MZ
2332
2333 /* We check for duplicates here, to allow arch-specific overrides. */
62a89c44 2334 i1 = get_target_table(vcpu->arch.target, true, &num);
7c8c5e6a
MZ
2335 end1 = i1 + num;
2336 i2 = sys_reg_descs;
2337 end2 = sys_reg_descs + ARRAY_SIZE(sys_reg_descs);
2338
2339 BUG_ON(i1 == end1 || i2 == end2);
2340
2341 /* Walk carefully, as both tables may refer to the same register. */
2342 while (i1 || i2) {
2343 int cmp = cmp_sys_reg(i1, i2);
2344 /* target-specific overrides generic entry. */
93390c0a
DM
2345 if (cmp <= 0)
2346 err = walk_one_sys_reg(i1, &uind, &total);
2347 else
2348 err = walk_one_sys_reg(i2, &uind, &total);
2349
2350 if (err)
2351 return err;
7c8c5e6a
MZ
2352
2353 if (cmp <= 0 && ++i1 == end1)
2354 i1 = NULL;
2355 if (cmp >= 0 && ++i2 == end2)
2356 i2 = NULL;
2357 }
2358 return total;
2359}
2360
2361unsigned long kvm_arm_num_sys_reg_descs(struct kvm_vcpu *vcpu)
2362{
2363 return ARRAY_SIZE(invariant_sys_regs)
2364 + num_demux_regs()
2365 + walk_sys_regs(vcpu, (u64 __user *)NULL);
2366}
2367
2368int kvm_arm_copy_sys_reg_indices(struct kvm_vcpu *vcpu, u64 __user *uindices)
2369{
2370 unsigned int i;
2371 int err;
2372
2373 /* Then give them all the invariant registers' indices. */
2374 for (i = 0; i < ARRAY_SIZE(invariant_sys_regs); i++) {
2375 if (put_user(sys_reg_to_index(&invariant_sys_regs[i]), uindices))
2376 return -EFAULT;
2377 uindices++;
2378 }
2379
2380 err = walk_sys_regs(vcpu, uindices);
2381 if (err < 0)
2382 return err;
2383 uindices += err;
2384
2385 return write_demux_regids(uindices);
2386}
2387
e6a95517
MZ
2388static int check_sysreg_table(const struct sys_reg_desc *table, unsigned int n)
2389{
2390 unsigned int i;
2391
2392 for (i = 1; i < n; i++) {
2393 if (cmp_sys_reg(&table[i-1], &table[i]) >= 0) {
2394 kvm_err("sys_reg table %p out of order (%d)\n", table, i - 1);
2395 return 1;
2396 }
2397 }
2398
2399 return 0;
2400}
2401
7c8c5e6a
MZ
2402void kvm_sys_reg_table_init(void)
2403{
2404 unsigned int i;
2405 struct sys_reg_desc clidr;
2406
2407 /* Make sure tables are unique and in order. */
e6a95517
MZ
2408 BUG_ON(check_sysreg_table(sys_reg_descs, ARRAY_SIZE(sys_reg_descs)));
2409 BUG_ON(check_sysreg_table(cp14_regs, ARRAY_SIZE(cp14_regs)));
2410 BUG_ON(check_sysreg_table(cp14_64_regs, ARRAY_SIZE(cp14_64_regs)));
2411 BUG_ON(check_sysreg_table(cp15_regs, ARRAY_SIZE(cp15_regs)));
2412 BUG_ON(check_sysreg_table(cp15_64_regs, ARRAY_SIZE(cp15_64_regs)));
2413 BUG_ON(check_sysreg_table(invariant_sys_regs, ARRAY_SIZE(invariant_sys_regs)));
7c8c5e6a
MZ
2414
2415 /* We abuse the reset function to overwrite the table itself. */
2416 for (i = 0; i < ARRAY_SIZE(invariant_sys_regs); i++)
2417 invariant_sys_regs[i].reset(NULL, &invariant_sys_regs[i]);
2418
2419 /*
2420 * CLIDR format is awkward, so clean it up. See ARM B4.1.20:
2421 *
2422 * If software reads the Cache Type fields from Ctype1
2423 * upwards, once it has seen a value of 0b000, no caches
2424 * exist at further-out levels of the hierarchy. So, for
2425 * example, if Ctype3 is the first Cache Type field with a
2426 * value of 0b000, the values of Ctype4 to Ctype7 must be
2427 * ignored.
2428 */
2429 get_clidr_el1(NULL, &clidr); /* Ugly... */
2430 cache_levels = clidr.val;
2431 for (i = 0; i < 7; i++)
2432 if (((cache_levels >> (i*3)) & 7) == 0)
2433 break;
2434 /* Clear all higher bits. */
2435 cache_levels &= (1 << (i*3))-1;
2436}
2437
2438/**
2439 * kvm_reset_sys_regs - sets system registers to reset value
2440 * @vcpu: The VCPU pointer
2441 *
2442 * This function finds the right table above and sets the registers on the
2443 * virtual CPU struct to their architecturally defined reset values.
2444 */
2445void kvm_reset_sys_regs(struct kvm_vcpu *vcpu)
2446{
2447 size_t num;
2448 const struct sys_reg_desc *table;
2449
2450 /* Catch someone adding a register without putting in reset entry. */
2451 memset(&vcpu->arch.ctxt.sys_regs, 0x42, sizeof(vcpu->arch.ctxt.sys_regs));
2452
2453 /* Generic chip reset first (so target could override). */
2454 reset_sys_reg_descs(vcpu, sys_reg_descs, ARRAY_SIZE(sys_reg_descs));
2455
62a89c44 2456 table = get_target_table(vcpu->arch.target, true, &num);
7c8c5e6a
MZ
2457 reset_sys_reg_descs(vcpu, table, num);
2458
2459 for (num = 1; num < NR_SYS_REGS; num++)
2460 if (vcpu_sys_reg(vcpu, num) == 0x4242424242424242)
2461 panic("Didn't reset vcpu_sys_reg(%zi)", num);
2462}