Commit | Line | Data |
---|---|---|
caab277b | 1 | // SPDX-License-Identifier: GPL-2.0-only |
2f4a07c5 MZ |
2 | /* |
3 | * Copyright (C) 2012,2013 - ARM Ltd | |
4 | * Author: Marc Zyngier <marc.zyngier@arm.com> | |
5 | * | |
6 | * Derived from arch/arm/kvm/guest.c: | |
7 | * Copyright (C) 2012 - Virtual Open Systems and Columbia University | |
8 | * Author: Christoffer Dall <c.dall@virtualopensystems.com> | |
2f4a07c5 MZ |
9 | */ |
10 | ||
e1c9c983 | 11 | #include <linux/bits.h> |
2f4a07c5 MZ |
12 | #include <linux/errno.h> |
13 | #include <linux/err.h> | |
e1c9c983 | 14 | #include <linux/nospec.h> |
2f4a07c5 MZ |
15 | #include <linux/kvm_host.h> |
16 | #include <linux/module.h> | |
be25bbb3 | 17 | #include <linux/stddef.h> |
dc52f31a | 18 | #include <linux/string.h> |
2f4a07c5 MZ |
19 | #include <linux/vmalloc.h> |
20 | #include <linux/fs.h> | |
85bd0ba1 | 21 | #include <kvm/arm_psci.h> |
2f4a07c5 | 22 | #include <asm/cputype.h> |
7c0f6ba6 | 23 | #include <linux/uaccess.h> |
e1c9c983 | 24 | #include <asm/fpsimd.h> |
2f4a07c5 | 25 | #include <asm/kvm.h> |
2f4a07c5 MZ |
26 | #include <asm/kvm_emulate.h> |
27 | #include <asm/kvm_coproc.h> | |
e1c9c983 DM |
28 | #include <asm/kvm_host.h> |
29 | #include <asm/sigcontext.h> | |
2f4a07c5 | 30 | |
eef8c85a AB |
31 | #include "trace.h" |
32 | ||
b19e6892 AT |
33 | #define VM_STAT(x) { #x, offsetof(struct kvm, stat.x), KVM_STAT_VM } |
34 | #define VCPU_STAT(x) { #x, offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU } | |
35 | ||
2f4a07c5 | 36 | struct kvm_stats_debugfs_item debugfs_entries[] = { |
b19e6892 AT |
37 | VCPU_STAT(hvc_exit_stat), |
38 | VCPU_STAT(wfe_exit_stat), | |
39 | VCPU_STAT(wfi_exit_stat), | |
40 | VCPU_STAT(mmio_exit_user), | |
41 | VCPU_STAT(mmio_exit_kernel), | |
42 | VCPU_STAT(exits), | |
2f4a07c5 MZ |
43 | { NULL } |
44 | }; | |
45 | ||
46 | int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu) | |
47 | { | |
2f4a07c5 MZ |
48 | return 0; |
49 | } | |
50 | ||
8c86dfe3 DM |
51 | static bool core_reg_offset_is_vreg(u64 off) |
52 | { | |
53 | return off >= KVM_REG_ARM_CORE_REG(fp_regs.vregs) && | |
54 | off < KVM_REG_ARM_CORE_REG(fp_regs.fpsr); | |
55 | } | |
56 | ||
2f4a07c5 MZ |
57 | static u64 core_reg_offset_from_id(u64 id) |
58 | { | |
59 | return id & ~(KVM_REG_ARCH_MASK | KVM_REG_SIZE_MASK | KVM_REG_ARM_CORE); | |
60 | } | |
61 | ||
df205b5c | 62 | static int core_reg_size_from_offset(const struct kvm_vcpu *vcpu, u64 off) |
d26c25a9 | 63 | { |
d26c25a9 DM |
64 | int size; |
65 | ||
66 | switch (off) { | |
67 | case KVM_REG_ARM_CORE_REG(regs.regs[0]) ... | |
68 | KVM_REG_ARM_CORE_REG(regs.regs[30]): | |
69 | case KVM_REG_ARM_CORE_REG(regs.sp): | |
70 | case KVM_REG_ARM_CORE_REG(regs.pc): | |
71 | case KVM_REG_ARM_CORE_REG(regs.pstate): | |
72 | case KVM_REG_ARM_CORE_REG(sp_el1): | |
73 | case KVM_REG_ARM_CORE_REG(elr_el1): | |
74 | case KVM_REG_ARM_CORE_REG(spsr[0]) ... | |
75 | KVM_REG_ARM_CORE_REG(spsr[KVM_NR_SPSR - 1]): | |
76 | size = sizeof(__u64); | |
77 | break; | |
78 | ||
79 | case KVM_REG_ARM_CORE_REG(fp_regs.vregs[0]) ... | |
80 | KVM_REG_ARM_CORE_REG(fp_regs.vregs[31]): | |
81 | size = sizeof(__uint128_t); | |
82 | break; | |
83 | ||
84 | case KVM_REG_ARM_CORE_REG(fp_regs.fpsr): | |
85 | case KVM_REG_ARM_CORE_REG(fp_regs.fpcr): | |
86 | size = sizeof(__u32); | |
87 | break; | |
88 | ||
89 | default: | |
90 | return -EINVAL; | |
91 | } | |
92 | ||
df205b5c | 93 | if (!IS_ALIGNED(off, size / sizeof(__u32))) |
8c86dfe3 | 94 | return -EINVAL; |
d26c25a9 | 95 | |
8c86dfe3 DM |
96 | /* |
97 | * The KVM_REG_ARM64_SVE regs must be used instead of | |
98 | * KVM_REG_ARM_CORE for accessing the FPSIMD V-registers on | |
99 | * SVE-enabled vcpus: | |
100 | */ | |
101 | if (vcpu_has_sve(vcpu) && core_reg_offset_is_vreg(off)) | |
102 | return -EINVAL; | |
103 | ||
df205b5c DM |
104 | return size; |
105 | } | |
106 | ||
107 | static int validate_core_offset(const struct kvm_vcpu *vcpu, | |
108 | const struct kvm_one_reg *reg) | |
109 | { | |
110 | u64 off = core_reg_offset_from_id(reg->id); | |
111 | int size = core_reg_size_from_offset(vcpu, off); | |
112 | ||
113 | if (size < 0) | |
114 | return -EINVAL; | |
115 | ||
116 | if (KVM_REG_SIZE(reg->id) != size) | |
117 | return -EINVAL; | |
118 | ||
8c86dfe3 | 119 | return 0; |
d26c25a9 DM |
120 | } |
121 | ||
2f4a07c5 MZ |
122 | static int get_core_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) |
123 | { | |
124 | /* | |
125 | * Because the kvm_regs structure is a mix of 32, 64 and | |
126 | * 128bit fields, we index it as if it was a 32bit | |
127 | * array. Hence below, nr_regs is the number of entries, and | |
128 | * off the index in the "array". | |
129 | */ | |
130 | __u32 __user *uaddr = (__u32 __user *)(unsigned long)reg->addr; | |
131 | struct kvm_regs *regs = vcpu_gp_regs(vcpu); | |
132 | int nr_regs = sizeof(*regs) / sizeof(__u32); | |
133 | u32 off; | |
134 | ||
135 | /* Our ID is an index into the kvm_regs struct. */ | |
136 | off = core_reg_offset_from_id(reg->id); | |
137 | if (off >= nr_regs || | |
138 | (off + (KVM_REG_SIZE(reg->id) / sizeof(__u32))) >= nr_regs) | |
139 | return -ENOENT; | |
140 | ||
8c86dfe3 | 141 | if (validate_core_offset(vcpu, reg)) |
d26c25a9 DM |
142 | return -EINVAL; |
143 | ||
2f4a07c5 MZ |
144 | if (copy_to_user(uaddr, ((u32 *)regs) + off, KVM_REG_SIZE(reg->id))) |
145 | return -EFAULT; | |
146 | ||
147 | return 0; | |
148 | } | |
149 | ||
150 | static int set_core_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) | |
151 | { | |
152 | __u32 __user *uaddr = (__u32 __user *)(unsigned long)reg->addr; | |
153 | struct kvm_regs *regs = vcpu_gp_regs(vcpu); | |
154 | int nr_regs = sizeof(*regs) / sizeof(__u32); | |
155 | __uint128_t tmp; | |
156 | void *valp = &tmp; | |
157 | u64 off; | |
158 | int err = 0; | |
159 | ||
160 | /* Our ID is an index into the kvm_regs struct. */ | |
161 | off = core_reg_offset_from_id(reg->id); | |
162 | if (off >= nr_regs || | |
163 | (off + (KVM_REG_SIZE(reg->id) / sizeof(__u32))) >= nr_regs) | |
164 | return -ENOENT; | |
165 | ||
8c86dfe3 | 166 | if (validate_core_offset(vcpu, reg)) |
d26c25a9 DM |
167 | return -EINVAL; |
168 | ||
2f4a07c5 MZ |
169 | if (KVM_REG_SIZE(reg->id) > sizeof(tmp)) |
170 | return -EINVAL; | |
171 | ||
172 | if (copy_from_user(valp, uaddr, KVM_REG_SIZE(reg->id))) { | |
173 | err = -EFAULT; | |
174 | goto out; | |
175 | } | |
176 | ||
177 | if (off == KVM_REG_ARM_CORE_REG(regs.pstate)) { | |
2a3f9345 | 178 | u64 mode = (*(u64 *)valp) & PSR_AA32_MODE_MASK; |
2f4a07c5 | 179 | switch (mode) { |
256c0960 | 180 | case PSR_AA32_MODE_USR: |
2a3f9345 MZ |
181 | if (!system_supports_32bit_el0()) |
182 | return -EINVAL; | |
183 | break; | |
256c0960 MR |
184 | case PSR_AA32_MODE_FIQ: |
185 | case PSR_AA32_MODE_IRQ: | |
186 | case PSR_AA32_MODE_SVC: | |
187 | case PSR_AA32_MODE_ABT: | |
188 | case PSR_AA32_MODE_UND: | |
2a3f9345 MZ |
189 | if (!vcpu_el1_is_32bit(vcpu)) |
190 | return -EINVAL; | |
191 | break; | |
2f4a07c5 MZ |
192 | case PSR_MODE_EL0t: |
193 | case PSR_MODE_EL1t: | |
194 | case PSR_MODE_EL1h: | |
2a3f9345 MZ |
195 | if (vcpu_el1_is_32bit(vcpu)) |
196 | return -EINVAL; | |
2f4a07c5 MZ |
197 | break; |
198 | default: | |
199 | err = -EINVAL; | |
200 | goto out; | |
201 | } | |
202 | } | |
203 | ||
204 | memcpy((u32 *)regs + off, valp, KVM_REG_SIZE(reg->id)); | |
205 | out: | |
206 | return err; | |
207 | } | |
208 | ||
9033bba4 DM |
209 | #define vq_word(vq) (((vq) - SVE_VQ_MIN) / 64) |
210 | #define vq_mask(vq) ((u64)1 << ((vq) - SVE_VQ_MIN) % 64) | |
e644fa18 | 211 | #define vq_present(vqs, vq) (!!((vqs)[vq_word(vq)] & vq_mask(vq))) |
9033bba4 DM |
212 | |
213 | static int get_sve_vls(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) | |
214 | { | |
215 | unsigned int max_vq, vq; | |
4bd774e5 | 216 | u64 vqs[KVM_ARM64_SVE_VLS_WORDS]; |
9033bba4 | 217 | |
52110aa9 DM |
218 | if (!vcpu_has_sve(vcpu)) |
219 | return -ENOENT; | |
220 | ||
9033bba4 DM |
221 | if (WARN_ON(!sve_vl_valid(vcpu->arch.sve_max_vl))) |
222 | return -EINVAL; | |
223 | ||
224 | memset(vqs, 0, sizeof(vqs)); | |
225 | ||
226 | max_vq = sve_vq_from_vl(vcpu->arch.sve_max_vl); | |
227 | for (vq = SVE_VQ_MIN; vq <= max_vq; ++vq) | |
228 | if (sve_vq_available(vq)) | |
229 | vqs[vq_word(vq)] |= vq_mask(vq); | |
230 | ||
231 | if (copy_to_user((void __user *)reg->addr, vqs, sizeof(vqs))) | |
232 | return -EFAULT; | |
233 | ||
234 | return 0; | |
235 | } | |
236 | ||
237 | static int set_sve_vls(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) | |
238 | { | |
239 | unsigned int max_vq, vq; | |
4bd774e5 | 240 | u64 vqs[KVM_ARM64_SVE_VLS_WORDS]; |
9033bba4 | 241 | |
52110aa9 DM |
242 | if (!vcpu_has_sve(vcpu)) |
243 | return -ENOENT; | |
244 | ||
9033bba4 DM |
245 | if (kvm_arm_vcpu_sve_finalized(vcpu)) |
246 | return -EPERM; /* too late! */ | |
247 | ||
248 | if (WARN_ON(vcpu->arch.sve_state)) | |
249 | return -EINVAL; | |
250 | ||
251 | if (copy_from_user(vqs, (const void __user *)reg->addr, sizeof(vqs))) | |
252 | return -EFAULT; | |
253 | ||
254 | max_vq = 0; | |
255 | for (vq = SVE_VQ_MIN; vq <= SVE_VQ_MAX; ++vq) | |
0c529ff7 | 256 | if (vq_present(vqs, vq)) |
9033bba4 DM |
257 | max_vq = vq; |
258 | ||
259 | if (max_vq > sve_vq_from_vl(kvm_sve_max_vl)) | |
260 | return -EINVAL; | |
261 | ||
ecfb6ed4 DM |
262 | /* |
263 | * Vector lengths supported by the host can't currently be | |
264 | * hidden from the guest individually: instead we can only set a | |
265 | * maxmium via ZCR_EL2.LEN. So, make sure the available vector | |
266 | * lengths match the set requested exactly up to the requested | |
267 | * maximum: | |
268 | */ | |
9033bba4 | 269 | for (vq = SVE_VQ_MIN; vq <= max_vq; ++vq) |
0c529ff7 | 270 | if (vq_present(vqs, vq) != sve_vq_available(vq)) |
9033bba4 DM |
271 | return -EINVAL; |
272 | ||
273 | /* Can't run with no vector lengths at all: */ | |
274 | if (max_vq < SVE_VQ_MIN) | |
275 | return -EINVAL; | |
276 | ||
277 | /* vcpu->arch.sve_state will be alloc'd by kvm_vcpu_finalize_sve() */ | |
278 | vcpu->arch.sve_max_vl = sve_vl_from_vq(max_vq); | |
279 | ||
280 | return 0; | |
281 | } | |
282 | ||
e1c9c983 DM |
283 | #define SVE_REG_SLICE_SHIFT 0 |
284 | #define SVE_REG_SLICE_BITS 5 | |
285 | #define SVE_REG_ID_SHIFT (SVE_REG_SLICE_SHIFT + SVE_REG_SLICE_BITS) | |
286 | #define SVE_REG_ID_BITS 5 | |
287 | ||
288 | #define SVE_REG_SLICE_MASK \ | |
289 | GENMASK(SVE_REG_SLICE_SHIFT + SVE_REG_SLICE_BITS - 1, \ | |
290 | SVE_REG_SLICE_SHIFT) | |
291 | #define SVE_REG_ID_MASK \ | |
292 | GENMASK(SVE_REG_ID_SHIFT + SVE_REG_ID_BITS - 1, SVE_REG_ID_SHIFT) | |
293 | ||
294 | #define SVE_NUM_SLICES (1 << SVE_REG_SLICE_BITS) | |
295 | ||
296 | #define KVM_SVE_ZREG_SIZE KVM_REG_SIZE(KVM_REG_ARM64_SVE_ZREG(0, 0)) | |
297 | #define KVM_SVE_PREG_SIZE KVM_REG_SIZE(KVM_REG_ARM64_SVE_PREG(0, 0)) | |
298 | ||
8e3c54c8 | 299 | /* |
f8d4635a DM |
300 | * Number of register slices required to cover each whole SVE register. |
301 | * NOTE: Only the first slice every exists, for now. | |
302 | * If you are tempted to modify this, you must also rework sve_reg_to_region() | |
303 | * to match: | |
8e3c54c8 DM |
304 | */ |
305 | #define vcpu_sve_slices(vcpu) 1 | |
306 | ||
e1c9c983 DM |
307 | /* Bounds of a single SVE register slice within vcpu->arch.sve_state */ |
308 | struct sve_state_reg_region { | |
309 | unsigned int koffset; /* offset into sve_state in kernel memory */ | |
310 | unsigned int klen; /* length in kernel memory */ | |
311 | unsigned int upad; /* extra trailing padding in user memory */ | |
312 | }; | |
313 | ||
52110aa9 DM |
314 | /* |
315 | * Validate SVE register ID and get sanitised bounds for user/kernel SVE | |
316 | * register copy | |
317 | */ | |
e1c9c983 DM |
318 | static int sve_reg_to_region(struct sve_state_reg_region *region, |
319 | struct kvm_vcpu *vcpu, | |
320 | const struct kvm_one_reg *reg) | |
321 | { | |
322 | /* reg ID ranges for Z- registers */ | |
323 | const u64 zreg_id_min = KVM_REG_ARM64_SVE_ZREG(0, 0); | |
324 | const u64 zreg_id_max = KVM_REG_ARM64_SVE_ZREG(SVE_NUM_ZREGS - 1, | |
325 | SVE_NUM_SLICES - 1); | |
326 | ||
327 | /* reg ID ranges for P- registers and FFR (which are contiguous) */ | |
328 | const u64 preg_id_min = KVM_REG_ARM64_SVE_PREG(0, 0); | |
329 | const u64 preg_id_max = KVM_REG_ARM64_SVE_FFR(SVE_NUM_SLICES - 1); | |
330 | ||
331 | unsigned int vq; | |
332 | unsigned int reg_num; | |
333 | ||
334 | unsigned int reqoffset, reqlen; /* User-requested offset and length */ | |
335 | unsigned int maxlen; /* Maxmimum permitted length */ | |
336 | ||
337 | size_t sve_state_size; | |
338 | ||
8ae6efdd DM |
339 | const u64 last_preg_id = KVM_REG_ARM64_SVE_PREG(SVE_NUM_PREGS - 1, |
340 | SVE_NUM_SLICES - 1); | |
341 | ||
342 | /* Verify that the P-regs and FFR really do have contiguous IDs: */ | |
343 | BUILD_BUG_ON(KVM_REG_ARM64_SVE_FFR(0) != last_preg_id + 1); | |
344 | ||
345 | /* Verify that we match the UAPI header: */ | |
346 | BUILD_BUG_ON(SVE_NUM_SLICES != KVM_ARM64_SVE_MAX_SLICES); | |
347 | ||
e1c9c983 DM |
348 | reg_num = (reg->id & SVE_REG_ID_MASK) >> SVE_REG_ID_SHIFT; |
349 | ||
350 | if (reg->id >= zreg_id_min && reg->id <= zreg_id_max) { | |
52110aa9 DM |
351 | if (!vcpu_has_sve(vcpu) || (reg->id & SVE_REG_SLICE_MASK) > 0) |
352 | return -ENOENT; | |
353 | ||
354 | vq = sve_vq_from_vl(vcpu->arch.sve_max_vl); | |
355 | ||
e1c9c983 DM |
356 | reqoffset = SVE_SIG_ZREG_OFFSET(vq, reg_num) - |
357 | SVE_SIG_REGS_OFFSET; | |
358 | reqlen = KVM_SVE_ZREG_SIZE; | |
359 | maxlen = SVE_SIG_ZREG_SIZE(vq); | |
360 | } else if (reg->id >= preg_id_min && reg->id <= preg_id_max) { | |
52110aa9 DM |
361 | if (!vcpu_has_sve(vcpu) || (reg->id & SVE_REG_SLICE_MASK) > 0) |
362 | return -ENOENT; | |
363 | ||
364 | vq = sve_vq_from_vl(vcpu->arch.sve_max_vl); | |
365 | ||
e1c9c983 DM |
366 | reqoffset = SVE_SIG_PREG_OFFSET(vq, reg_num) - |
367 | SVE_SIG_REGS_OFFSET; | |
368 | reqlen = KVM_SVE_PREG_SIZE; | |
369 | maxlen = SVE_SIG_PREG_SIZE(vq); | |
370 | } else { | |
52110aa9 | 371 | return -EINVAL; |
e1c9c983 DM |
372 | } |
373 | ||
374 | sve_state_size = vcpu_sve_state_size(vcpu); | |
55ffad3b | 375 | if (WARN_ON(!sve_state_size)) |
e1c9c983 DM |
376 | return -EINVAL; |
377 | ||
378 | region->koffset = array_index_nospec(reqoffset, sve_state_size); | |
379 | region->klen = min(maxlen, reqlen); | |
380 | region->upad = reqlen - region->klen; | |
381 | ||
382 | return 0; | |
383 | } | |
384 | ||
385 | static int get_sve_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) | |
386 | { | |
52110aa9 | 387 | int ret; |
e1c9c983 DM |
388 | struct sve_state_reg_region region; |
389 | char __user *uptr = (char __user *)reg->addr; | |
390 | ||
9033bba4 DM |
391 | /* Handle the KVM_REG_ARM64_SVE_VLS pseudo-reg as a special case: */ |
392 | if (reg->id == KVM_REG_ARM64_SVE_VLS) | |
393 | return get_sve_vls(vcpu, reg); | |
394 | ||
52110aa9 DM |
395 | /* Try to interpret reg ID as an architectural SVE register... */ |
396 | ret = sve_reg_to_region(®ion, vcpu, reg); | |
397 | if (ret) | |
398 | return ret; | |
9033bba4 DM |
399 | |
400 | if (!kvm_arm_vcpu_sve_finalized(vcpu)) | |
401 | return -EPERM; | |
402 | ||
e1c9c983 DM |
403 | if (copy_to_user(uptr, vcpu->arch.sve_state + region.koffset, |
404 | region.klen) || | |
405 | clear_user(uptr + region.klen, region.upad)) | |
406 | return -EFAULT; | |
407 | ||
408 | return 0; | |
409 | } | |
410 | ||
411 | static int set_sve_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) | |
412 | { | |
52110aa9 | 413 | int ret; |
e1c9c983 DM |
414 | struct sve_state_reg_region region; |
415 | const char __user *uptr = (const char __user *)reg->addr; | |
416 | ||
9033bba4 DM |
417 | /* Handle the KVM_REG_ARM64_SVE_VLS pseudo-reg as a special case: */ |
418 | if (reg->id == KVM_REG_ARM64_SVE_VLS) | |
419 | return set_sve_vls(vcpu, reg); | |
420 | ||
52110aa9 DM |
421 | /* Try to interpret reg ID as an architectural SVE register... */ |
422 | ret = sve_reg_to_region(®ion, vcpu, reg); | |
423 | if (ret) | |
424 | return ret; | |
9033bba4 DM |
425 | |
426 | if (!kvm_arm_vcpu_sve_finalized(vcpu)) | |
427 | return -EPERM; | |
428 | ||
e1c9c983 DM |
429 | if (copy_from_user(vcpu->arch.sve_state + region.koffset, uptr, |
430 | region.klen)) | |
431 | return -EFAULT; | |
432 | ||
433 | return 0; | |
434 | } | |
435 | ||
2f4a07c5 MZ |
436 | int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) |
437 | { | |
438 | return -EINVAL; | |
439 | } | |
440 | ||
441 | int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) | |
442 | { | |
443 | return -EINVAL; | |
444 | } | |
445 | ||
8c86dfe3 DM |
446 | static int copy_core_reg_indices(const struct kvm_vcpu *vcpu, |
447 | u64 __user *uindices) | |
be25bbb3 DM |
448 | { |
449 | unsigned int i; | |
450 | int n = 0; | |
be25bbb3 DM |
451 | |
452 | for (i = 0; i < sizeof(struct kvm_regs) / sizeof(__u32); i++) { | |
df205b5c DM |
453 | u64 reg = KVM_REG_ARM64 | KVM_REG_ARM_CORE | i; |
454 | int size = core_reg_size_from_offset(vcpu, i); | |
455 | ||
456 | if (size < 0) | |
457 | continue; | |
458 | ||
459 | switch (size) { | |
460 | case sizeof(__u32): | |
461 | reg |= KVM_REG_SIZE_U32; | |
462 | break; | |
463 | ||
464 | case sizeof(__u64): | |
465 | reg |= KVM_REG_SIZE_U64; | |
466 | break; | |
467 | ||
468 | case sizeof(__uint128_t): | |
469 | reg |= KVM_REG_SIZE_U128; | |
470 | break; | |
471 | ||
472 | default: | |
473 | WARN_ON(1); | |
8c86dfe3 | 474 | continue; |
df205b5c | 475 | } |
8c86dfe3 | 476 | |
be25bbb3 | 477 | if (uindices) { |
df205b5c | 478 | if (put_user(reg, uindices)) |
be25bbb3 DM |
479 | return -EFAULT; |
480 | uindices++; | |
481 | } | |
482 | ||
483 | n++; | |
484 | } | |
485 | ||
486 | return n; | |
487 | } | |
488 | ||
8c86dfe3 | 489 | static unsigned long num_core_regs(const struct kvm_vcpu *vcpu) |
2f4a07c5 | 490 | { |
8c86dfe3 | 491 | return copy_core_reg_indices(vcpu, NULL); |
2f4a07c5 MZ |
492 | } |
493 | ||
1df08ba0 AB |
494 | /** |
495 | * ARM64 versions of the TIMER registers, always available on arm64 | |
496 | */ | |
497 | ||
498 | #define NUM_TIMER_REGS 3 | |
499 | ||
500 | static bool is_timer_reg(u64 index) | |
501 | { | |
502 | switch (index) { | |
503 | case KVM_REG_ARM_TIMER_CTL: | |
504 | case KVM_REG_ARM_TIMER_CNT: | |
505 | case KVM_REG_ARM_TIMER_CVAL: | |
506 | return true; | |
507 | } | |
508 | return false; | |
509 | } | |
510 | ||
511 | static int copy_timer_indices(struct kvm_vcpu *vcpu, u64 __user *uindices) | |
512 | { | |
513 | if (put_user(KVM_REG_ARM_TIMER_CTL, uindices)) | |
514 | return -EFAULT; | |
515 | uindices++; | |
516 | if (put_user(KVM_REG_ARM_TIMER_CNT, uindices)) | |
517 | return -EFAULT; | |
518 | uindices++; | |
519 | if (put_user(KVM_REG_ARM_TIMER_CVAL, uindices)) | |
520 | return -EFAULT; | |
521 | ||
522 | return 0; | |
523 | } | |
524 | ||
525 | static int set_timer_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) | |
526 | { | |
527 | void __user *uaddr = (void __user *)(long)reg->addr; | |
528 | u64 val; | |
529 | int ret; | |
530 | ||
531 | ret = copy_from_user(&val, uaddr, KVM_REG_SIZE(reg->id)); | |
532 | if (ret != 0) | |
bd218bce | 533 | return -EFAULT; |
1df08ba0 AB |
534 | |
535 | return kvm_arm_timer_set_reg(vcpu, reg->id, val); | |
536 | } | |
537 | ||
538 | static int get_timer_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) | |
539 | { | |
540 | void __user *uaddr = (void __user *)(long)reg->addr; | |
541 | u64 val; | |
542 | ||
543 | val = kvm_arm_timer_get_reg(vcpu, reg->id); | |
4cad67fc | 544 | return copy_to_user(uaddr, &val, KVM_REG_SIZE(reg->id)) ? -EFAULT : 0; |
1df08ba0 AB |
545 | } |
546 | ||
8e3c54c8 DM |
547 | static unsigned long num_sve_regs(const struct kvm_vcpu *vcpu) |
548 | { | |
8e3c54c8 DM |
549 | const unsigned int slices = vcpu_sve_slices(vcpu); |
550 | ||
551 | if (!vcpu_has_sve(vcpu)) | |
552 | return 0; | |
553 | ||
9033bba4 DM |
554 | /* Policed by KVM_GET_REG_LIST: */ |
555 | WARN_ON(!kvm_arm_vcpu_sve_finalized(vcpu)); | |
556 | ||
557 | return slices * (SVE_NUM_PREGS + SVE_NUM_ZREGS + 1 /* FFR */) | |
558 | + 1; /* KVM_REG_ARM64_SVE_VLS */ | |
8e3c54c8 DM |
559 | } |
560 | ||
561 | static int copy_sve_reg_indices(const struct kvm_vcpu *vcpu, | |
562 | u64 __user *uindices) | |
563 | { | |
8e3c54c8 DM |
564 | const unsigned int slices = vcpu_sve_slices(vcpu); |
565 | u64 reg; | |
566 | unsigned int i, n; | |
567 | int num_regs = 0; | |
568 | ||
569 | if (!vcpu_has_sve(vcpu)) | |
570 | return 0; | |
571 | ||
9033bba4 DM |
572 | /* Policed by KVM_GET_REG_LIST: */ |
573 | WARN_ON(!kvm_arm_vcpu_sve_finalized(vcpu)); | |
574 | ||
575 | /* | |
576 | * Enumerate this first, so that userspace can save/restore in | |
577 | * the order reported by KVM_GET_REG_LIST: | |
578 | */ | |
579 | reg = KVM_REG_ARM64_SVE_VLS; | |
580 | if (put_user(reg, uindices++)) | |
581 | return -EFAULT; | |
9033bba4 DM |
582 | ++num_regs; |
583 | ||
8e3c54c8 DM |
584 | for (i = 0; i < slices; i++) { |
585 | for (n = 0; n < SVE_NUM_ZREGS; n++) { | |
586 | reg = KVM_REG_ARM64_SVE_ZREG(n, i); | |
587 | if (put_user(reg, uindices++)) | |
588 | return -EFAULT; | |
8e3c54c8 DM |
589 | num_regs++; |
590 | } | |
591 | ||
592 | for (n = 0; n < SVE_NUM_PREGS; n++) { | |
593 | reg = KVM_REG_ARM64_SVE_PREG(n, i); | |
594 | if (put_user(reg, uindices++)) | |
595 | return -EFAULT; | |
8e3c54c8 DM |
596 | num_regs++; |
597 | } | |
598 | ||
599 | reg = KVM_REG_ARM64_SVE_FFR(i); | |
600 | if (put_user(reg, uindices++)) | |
601 | return -EFAULT; | |
8e3c54c8 DM |
602 | num_regs++; |
603 | } | |
604 | ||
605 | return num_regs; | |
606 | } | |
607 | ||
2f4a07c5 MZ |
608 | /** |
609 | * kvm_arm_num_regs - how many registers do we present via KVM_GET_ONE_REG | |
610 | * | |
611 | * This is for all registers. | |
612 | */ | |
613 | unsigned long kvm_arm_num_regs(struct kvm_vcpu *vcpu) | |
614 | { | |
7aa92cf3 DM |
615 | unsigned long res = 0; |
616 | ||
8c86dfe3 | 617 | res += num_core_regs(vcpu); |
8e3c54c8 | 618 | res += num_sve_regs(vcpu); |
7aa92cf3 DM |
619 | res += kvm_arm_num_sys_reg_descs(vcpu); |
620 | res += kvm_arm_get_fw_num_regs(vcpu); | |
621 | res += NUM_TIMER_REGS; | |
622 | ||
623 | return res; | |
2f4a07c5 MZ |
624 | } |
625 | ||
626 | /** | |
627 | * kvm_arm_copy_reg_indices - get indices of all registers. | |
628 | * | |
edce2292 | 629 | * We do core registers right here, then we append system regs. |
2f4a07c5 MZ |
630 | */ |
631 | int kvm_arm_copy_reg_indices(struct kvm_vcpu *vcpu, u64 __user *uindices) | |
632 | { | |
1df08ba0 | 633 | int ret; |
2f4a07c5 | 634 | |
8c86dfe3 | 635 | ret = copy_core_reg_indices(vcpu, uindices); |
5d8d4af2 | 636 | if (ret < 0) |
be25bbb3 DM |
637 | return ret; |
638 | uindices += ret; | |
2f4a07c5 | 639 | |
8e3c54c8 | 640 | ret = copy_sve_reg_indices(vcpu, uindices); |
5d8d4af2 | 641 | if (ret < 0) |
8e3c54c8 DM |
642 | return ret; |
643 | uindices += ret; | |
644 | ||
85bd0ba1 | 645 | ret = kvm_arm_copy_fw_reg_indices(vcpu, uindices); |
5d8d4af2 | 646 | if (ret < 0) |
85bd0ba1 MZ |
647 | return ret; |
648 | uindices += kvm_arm_get_fw_num_regs(vcpu); | |
649 | ||
1df08ba0 | 650 | ret = copy_timer_indices(vcpu, uindices); |
5d8d4af2 | 651 | if (ret < 0) |
1df08ba0 AB |
652 | return ret; |
653 | uindices += NUM_TIMER_REGS; | |
654 | ||
2f4a07c5 MZ |
655 | return kvm_arm_copy_sys_reg_indices(vcpu, uindices); |
656 | } | |
657 | ||
658 | int kvm_arm_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) | |
659 | { | |
660 | /* We currently use nothing arch-specific in upper 32 bits */ | |
661 | if ((reg->id & ~KVM_REG_SIZE_MASK) >> 32 != KVM_REG_ARM64 >> 32) | |
662 | return -EINVAL; | |
663 | ||
e1c9c983 DM |
664 | switch (reg->id & KVM_REG_ARM_COPROC_MASK) { |
665 | case KVM_REG_ARM_CORE: return get_core_reg(vcpu, reg); | |
666 | case KVM_REG_ARM_FW: return kvm_arm_get_fw_reg(vcpu, reg); | |
667 | case KVM_REG_ARM64_SVE: return get_sve_reg(vcpu, reg); | |
e1c9c983 | 668 | } |
85bd0ba1 | 669 | |
1df08ba0 AB |
670 | if (is_timer_reg(reg->id)) |
671 | return get_timer_reg(vcpu, reg); | |
672 | ||
2f4a07c5 MZ |
673 | return kvm_arm_sys_reg_get_reg(vcpu, reg); |
674 | } | |
675 | ||
676 | int kvm_arm_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) | |
677 | { | |
678 | /* We currently use nothing arch-specific in upper 32 bits */ | |
679 | if ((reg->id & ~KVM_REG_SIZE_MASK) >> 32 != KVM_REG_ARM64 >> 32) | |
680 | return -EINVAL; | |
681 | ||
e1c9c983 DM |
682 | switch (reg->id & KVM_REG_ARM_COPROC_MASK) { |
683 | case KVM_REG_ARM_CORE: return set_core_reg(vcpu, reg); | |
684 | case KVM_REG_ARM_FW: return kvm_arm_set_fw_reg(vcpu, reg); | |
685 | case KVM_REG_ARM64_SVE: return set_sve_reg(vcpu, reg); | |
e1c9c983 | 686 | } |
85bd0ba1 | 687 | |
1df08ba0 AB |
688 | if (is_timer_reg(reg->id)) |
689 | return set_timer_reg(vcpu, reg); | |
690 | ||
2f4a07c5 MZ |
691 | return kvm_arm_sys_reg_set_reg(vcpu, reg); |
692 | } | |
693 | ||
694 | int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu, | |
695 | struct kvm_sregs *sregs) | |
696 | { | |
697 | return -EINVAL; | |
698 | } | |
699 | ||
700 | int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu, | |
701 | struct kvm_sregs *sregs) | |
702 | { | |
703 | return -EINVAL; | |
704 | } | |
705 | ||
539aee0e JM |
706 | int __kvm_arm_vcpu_get_events(struct kvm_vcpu *vcpu, |
707 | struct kvm_vcpu_events *events) | |
b7b27fac | 708 | { |
b7b27fac DG |
709 | events->exception.serror_pending = !!(vcpu->arch.hcr_el2 & HCR_VSE); |
710 | events->exception.serror_has_esr = cpus_have_const_cap(ARM64_HAS_RAS_EXTN); | |
711 | ||
712 | if (events->exception.serror_pending && events->exception.serror_has_esr) | |
713 | events->exception.serror_esr = vcpu_get_vsesr(vcpu); | |
714 | ||
715 | return 0; | |
716 | } | |
717 | ||
539aee0e JM |
718 | int __kvm_arm_vcpu_set_events(struct kvm_vcpu *vcpu, |
719 | struct kvm_vcpu_events *events) | |
b7b27fac | 720 | { |
b7b27fac DG |
721 | bool serror_pending = events->exception.serror_pending; |
722 | bool has_esr = events->exception.serror_has_esr; | |
723 | ||
b7b27fac DG |
724 | if (serror_pending && has_esr) { |
725 | if (!cpus_have_const_cap(ARM64_HAS_RAS_EXTN)) | |
726 | return -EINVAL; | |
727 | ||
728 | if (!((events->exception.serror_esr) & ~ESR_ELx_ISS_MASK)) | |
729 | kvm_set_sei_esr(vcpu, events->exception.serror_esr); | |
730 | else | |
731 | return -EINVAL; | |
732 | } else if (serror_pending) { | |
733 | kvm_inject_vabt(vcpu); | |
734 | } | |
735 | ||
736 | return 0; | |
737 | } | |
738 | ||
2f4a07c5 MZ |
739 | int __attribute_const__ kvm_target_cpu(void) |
740 | { | |
741 | unsigned long implementor = read_cpuid_implementor(); | |
742 | unsigned long part_number = read_cpuid_part_number(); | |
743 | ||
e28100bd AP |
744 | switch (implementor) { |
745 | case ARM_CPU_IMP_ARM: | |
746 | switch (part_number) { | |
747 | case ARM_CPU_PART_AEM_V8: | |
748 | return KVM_ARM_TARGET_AEM_V8; | |
749 | case ARM_CPU_PART_FOUNDATION: | |
750 | return KVM_ARM_TARGET_FOUNDATION_V8; | |
1252b331 MZ |
751 | case ARM_CPU_PART_CORTEX_A53: |
752 | return KVM_ARM_TARGET_CORTEX_A53; | |
e28100bd AP |
753 | case ARM_CPU_PART_CORTEX_A57: |
754 | return KVM_ARM_TARGET_CORTEX_A57; | |
f0725345 | 755 | } |
e28100bd AP |
756 | break; |
757 | case ARM_CPU_IMP_APM: | |
758 | switch (part_number) { | |
759 | case APM_CPU_PART_POTENZA: | |
760 | return KVM_ARM_TARGET_XGENE_POTENZA; | |
f0725345 | 761 | } |
e28100bd | 762 | break; |
f0725345 | 763 | } |
2f4a07c5 | 764 | |
bca556ac SP |
765 | /* Return a default generic target */ |
766 | return KVM_ARM_TARGET_GENERIC_V8; | |
2f4a07c5 MZ |
767 | } |
768 | ||
473bdc0e AP |
769 | int kvm_vcpu_preferred_target(struct kvm_vcpu_init *init) |
770 | { | |
771 | int target = kvm_target_cpu(); | |
772 | ||
773 | if (target < 0) | |
774 | return -ENODEV; | |
775 | ||
776 | memset(init, 0, sizeof(*init)); | |
777 | ||
778 | /* | |
779 | * For now, we don't return any features. | |
780 | * In future, we might use features to return target | |
781 | * specific features available for the preferred | |
782 | * target type. | |
783 | */ | |
784 | init->target = (__u32)target; | |
785 | ||
786 | return 0; | |
787 | } | |
788 | ||
2f4a07c5 MZ |
789 | int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu) |
790 | { | |
791 | return -EINVAL; | |
792 | } | |
793 | ||
794 | int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu) | |
795 | { | |
796 | return -EINVAL; | |
797 | } | |
798 | ||
799 | int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu, | |
800 | struct kvm_translation *tr) | |
801 | { | |
802 | return -EINVAL; | |
803 | } | |
0e6f07f2 | 804 | |
337b99bf AB |
805 | #define KVM_GUESTDBG_VALID_MASK (KVM_GUESTDBG_ENABLE | \ |
806 | KVM_GUESTDBG_USE_SW_BP | \ | |
834bf887 | 807 | KVM_GUESTDBG_USE_HW | \ |
337b99bf | 808 | KVM_GUESTDBG_SINGLESTEP) |
0e6f07f2 AB |
809 | |
810 | /** | |
811 | * kvm_arch_vcpu_ioctl_set_guest_debug - set up guest debugging | |
812 | * @kvm: pointer to the KVM struct | |
813 | * @kvm_guest_debug: the ioctl data buffer | |
814 | * | |
815 | * This sets up and enables the VM for guest debugging. Userspace | |
816 | * passes in a control flag to enable different debug types and | |
817 | * potentially other architecture specific information in the rest of | |
818 | * the structure. | |
819 | */ | |
820 | int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu, | |
821 | struct kvm_guest_debug *dbg) | |
822 | { | |
66b56562 CD |
823 | int ret = 0; |
824 | ||
eef8c85a AB |
825 | trace_kvm_set_guest_debug(vcpu, dbg->control); |
826 | ||
66b56562 CD |
827 | if (dbg->control & ~KVM_GUESTDBG_VALID_MASK) { |
828 | ret = -EINVAL; | |
829 | goto out; | |
830 | } | |
0e6f07f2 AB |
831 | |
832 | if (dbg->control & KVM_GUESTDBG_ENABLE) { | |
833 | vcpu->guest_debug = dbg->control; | |
834bf887 AB |
834 | |
835 | /* Hardware assisted Break and Watch points */ | |
836 | if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW) { | |
837 | vcpu->arch.external_debug_state = dbg->arch; | |
838 | } | |
839 | ||
0e6f07f2 AB |
840 | } else { |
841 | /* If not enabled clear all flags */ | |
842 | vcpu->guest_debug = 0; | |
843 | } | |
66b56562 CD |
844 | |
845 | out: | |
66b56562 | 846 | return ret; |
0e6f07f2 | 847 | } |
bb0c70bc SZ |
848 | |
849 | int kvm_arm_vcpu_arch_set_attr(struct kvm_vcpu *vcpu, | |
850 | struct kvm_device_attr *attr) | |
851 | { | |
852 | int ret; | |
853 | ||
854 | switch (attr->group) { | |
855 | case KVM_ARM_VCPU_PMU_V3_CTRL: | |
856 | ret = kvm_arm_pmu_v3_set_attr(vcpu, attr); | |
857 | break; | |
99a1db7a CD |
858 | case KVM_ARM_VCPU_TIMER_CTRL: |
859 | ret = kvm_arm_timer_set_attr(vcpu, attr); | |
860 | break; | |
bb0c70bc SZ |
861 | default: |
862 | ret = -ENXIO; | |
863 | break; | |
864 | } | |
865 | ||
866 | return ret; | |
867 | } | |
868 | ||
869 | int kvm_arm_vcpu_arch_get_attr(struct kvm_vcpu *vcpu, | |
870 | struct kvm_device_attr *attr) | |
871 | { | |
872 | int ret; | |
873 | ||
874 | switch (attr->group) { | |
875 | case KVM_ARM_VCPU_PMU_V3_CTRL: | |
876 | ret = kvm_arm_pmu_v3_get_attr(vcpu, attr); | |
877 | break; | |
99a1db7a CD |
878 | case KVM_ARM_VCPU_TIMER_CTRL: |
879 | ret = kvm_arm_timer_get_attr(vcpu, attr); | |
880 | break; | |
bb0c70bc SZ |
881 | default: |
882 | ret = -ENXIO; | |
883 | break; | |
884 | } | |
885 | ||
886 | return ret; | |
887 | } | |
888 | ||
889 | int kvm_arm_vcpu_arch_has_attr(struct kvm_vcpu *vcpu, | |
890 | struct kvm_device_attr *attr) | |
891 | { | |
892 | int ret; | |
893 | ||
894 | switch (attr->group) { | |
895 | case KVM_ARM_VCPU_PMU_V3_CTRL: | |
896 | ret = kvm_arm_pmu_v3_has_attr(vcpu, attr); | |
897 | break; | |
99a1db7a CD |
898 | case KVM_ARM_VCPU_TIMER_CTRL: |
899 | ret = kvm_arm_timer_has_attr(vcpu, attr); | |
900 | break; | |
bb0c70bc SZ |
901 | default: |
902 | ret = -ENXIO; | |
903 | break; | |
904 | } | |
905 | ||
906 | return ret; | |
907 | } |