Commit | Line | Data |
---|---|---|
caab277b | 1 | /* SPDX-License-Identifier: GPL-2.0-only */ |
4f8d6632 MZ |
2 | /* |
3 | * Copyright (C) 2012,2013 - ARM Ltd | |
4 | * Author: Marc Zyngier <marc.zyngier@arm.com> | |
5 | * | |
6 | * Derived from arch/arm/include/asm/kvm_host.h: | |
7 | * Copyright (C) 2012 - Virtual Open Systems and Columbia University | |
8 | * Author: Christoffer Dall <c.dall@virtualopensystems.com> | |
4f8d6632 MZ |
9 | */ |
10 | ||
11 | #ifndef __ARM64_KVM_HOST_H__ | |
12 | #define __ARM64_KVM_HOST_H__ | |
13 | ||
05469831 | 14 | #include <linux/arm-smccc.h> |
3f61f409 | 15 | #include <linux/bitmap.h> |
65647300 | 16 | #include <linux/types.h> |
3f61f409 | 17 | #include <linux/jump_label.h> |
65647300 | 18 | #include <linux/kvm_types.h> |
fb88707d | 19 | #include <linux/maple_tree.h> |
3f61f409 | 20 | #include <linux/percpu.h> |
ff367fe4 | 21 | #include <linux/psci.h> |
85738e05 | 22 | #include <asm/arch_gicv3.h> |
3f61f409 | 23 | #include <asm/barrier.h> |
63a1e1c9 | 24 | #include <asm/cpufeature.h> |
1e0cf16c | 25 | #include <asm/cputype.h> |
4f5abad9 | 26 | #include <asm/daifflags.h> |
17eed27b | 27 | #include <asm/fpsimd.h> |
4f8d6632 | 28 | #include <asm/kvm.h> |
3a3604bc | 29 | #include <asm/kvm_asm.h> |
d8bd48e3 | 30 | #include <asm/vncr_mapping.h> |
4f8d6632 | 31 | |
c1426e4c EA |
32 | #define __KVM_HAVE_ARCH_INTC_INITIALIZED |
33 | ||
920552b2 | 34 | #define KVM_HALT_POLL_NS_DEFAULT 500000 |
4f8d6632 MZ |
35 | |
36 | #include <kvm/arm_vgic.h> | |
37 | #include <kvm/arm_arch_timer.h> | |
04fe4726 | 38 | #include <kvm/arm_pmu.h> |
4f8d6632 | 39 | |
ef748917 ML |
40 | #define KVM_MAX_VCPUS VGIC_V3_MAX_CPUS |
41 | ||
a7484c80 | 42 | #define KVM_VCPU_MAX_FEATURES 9 |
a7a2c72a | 43 | #define KVM_VCPU_VALID_FEATURES (BIT(KVM_VCPU_MAX_FEATURES) - 1) |
4f8d6632 | 44 | |
7b244e2b | 45 | #define KVM_REQ_SLEEP \ |
2387149e | 46 | KVM_ARCH_REQ_FLAGS(0, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP) |
4b1b97f0 MZ |
47 | #define KVM_REQ_IRQ_PENDING KVM_ARCH_REQ(1) |
48 | #define KVM_REQ_VCPU_RESET KVM_ARCH_REQ(2) | |
49 | #define KVM_REQ_RECORD_STEAL KVM_ARCH_REQ(3) | |
50 | #define KVM_REQ_RELOAD_GICv4 KVM_ARCH_REQ(4) | |
51 | #define KVM_REQ_RELOAD_PMU KVM_ARCH_REQ(5) | |
52 | #define KVM_REQ_SUSPEND KVM_ARCH_REQ(6) | |
53 | #define KVM_REQ_RESYNC_PMU_EL0 KVM_ARCH_REQ(7) | |
54 | #define KVM_REQ_NESTED_S2_UNMAP KVM_ARCH_REQ(8) | |
55 | #define KVM_REQ_GUEST_HYP_IRQ_PENDING KVM_ARCH_REQ(9) | |
069a05e5 | 56 | #define KVM_REQ_MAP_L1_VNCR_EL2 KVM_ARCH_REQ(10) |
b13216cf | 57 | |
c862626e KZ |
58 | #define KVM_DIRTY_LOG_MANUAL_CAPS (KVM_DIRTY_LOG_MANUAL_PROTECT_ENABLE | \ |
59 | KVM_DIRTY_LOG_INITIALLY_SET) | |
60 | ||
fcc5bf89 JZ |
61 | #define KVM_HAVE_MMU_RWLOCK |
62 | ||
d8b369c4 DB |
63 | /* |
64 | * Mode of operation configurable with kvm-arm.mode early param. | |
65 | * See Documentation/admin-guide/kernel-parameters.txt for more information. | |
66 | */ | |
67 | enum kvm_mode { | |
68 | KVM_MODE_DEFAULT, | |
69 | KVM_MODE_PROTECTED, | |
675cabc8 | 70 | KVM_MODE_NV, |
b6a68b97 | 71 | KVM_MODE_NONE, |
d8b369c4 | 72 | }; |
675cabc8 | 73 | #ifdef CONFIG_KVM |
3eb681fb | 74 | enum kvm_mode kvm_get_mode(void); |
675cabc8 JL |
75 | #else |
76 | static inline enum kvm_mode kvm_get_mode(void) { return KVM_MODE_NONE; }; | |
77 | #endif | |
d8b369c4 | 78 | |
8d20bd63 | 79 | extern unsigned int __ro_after_init kvm_sve_max_vl; |
66d5b53e | 80 | extern unsigned int __ro_after_init kvm_host_sve_max_vl; |
8d20bd63 | 81 | int __init kvm_arm_init_sve(void); |
0f062bfe | 82 | |
6b7982fe | 83 | u32 __attribute_const__ kvm_target_cpu(void); |
3d4b2a4c | 84 | void kvm_reset_vcpu(struct kvm_vcpu *vcpu); |
19bcc89e | 85 | void kvm_arm_vcpu_destroy(struct kvm_vcpu *vcpu); |
4f8d6632 | 86 | |
717a7eeb QP |
87 | struct kvm_hyp_memcache { |
88 | phys_addr_t head; | |
89 | unsigned long nr_pages; | |
e912efed | 90 | struct pkvm_mapping *mapping; /* only used from EL1 */ |
79ea6623 VD |
91 | |
92 | #define HYP_MEMCACHE_ACCOUNT_STAGE2 BIT(1) | |
cf2d228d | 93 | unsigned long flags; |
717a7eeb QP |
94 | }; |
95 | ||
96 | static inline void push_hyp_memcache(struct kvm_hyp_memcache *mc, | |
97 | phys_addr_t *p, | |
98 | phys_addr_t (*to_pa)(void *virt)) | |
99 | { | |
100 | *p = mc->head; | |
101 | mc->head = to_pa(p); | |
102 | mc->nr_pages++; | |
103 | } | |
104 | ||
105 | static inline void *pop_hyp_memcache(struct kvm_hyp_memcache *mc, | |
106 | void *(*to_va)(phys_addr_t phys)) | |
107 | { | |
b938731e | 108 | phys_addr_t *p = to_va(mc->head & PAGE_MASK); |
717a7eeb QP |
109 | |
110 | if (!mc->nr_pages) | |
111 | return NULL; | |
112 | ||
113 | mc->head = *p; | |
114 | mc->nr_pages--; | |
115 | ||
116 | return p; | |
117 | } | |
118 | ||
119 | static inline int __topup_hyp_memcache(struct kvm_hyp_memcache *mc, | |
120 | unsigned long min_pages, | |
121 | void *(*alloc_fn)(void *arg), | |
122 | phys_addr_t (*to_pa)(void *virt), | |
123 | void *arg) | |
124 | { | |
125 | while (mc->nr_pages < min_pages) { | |
126 | phys_addr_t *p = alloc_fn(arg); | |
127 | ||
128 | if (!p) | |
129 | return -ENOMEM; | |
130 | push_hyp_memcache(mc, p, to_pa); | |
131 | } | |
132 | ||
133 | return 0; | |
134 | } | |
135 | ||
136 | static inline void __free_hyp_memcache(struct kvm_hyp_memcache *mc, | |
137 | void (*free_fn)(void *virt, void *arg), | |
138 | void *(*to_va)(phys_addr_t phys), | |
139 | void *arg) | |
140 | { | |
141 | while (mc->nr_pages) | |
142 | free_fn(pop_hyp_memcache(mc, to_va), arg); | |
143 | } | |
144 | ||
145 | void free_hyp_memcache(struct kvm_hyp_memcache *mc); | |
146 | int topup_hyp_memcache(struct kvm_hyp_memcache *mc, unsigned long min_pages); | |
147 | ||
e329fb75 | 148 | struct kvm_vmid { |
3248136b | 149 | atomic64_t id; |
e329fb75 CD |
150 | }; |
151 | ||
a0e50aa3 | 152 | struct kvm_s2_mmu { |
e329fb75 | 153 | struct kvm_vmid vmid; |
4f8d6632 | 154 | |
a0e50aa3 CD |
155 | /* |
156 | * stage2 entry level table | |
157 | * | |
158 | * Two kvm_s2_mmu structures in the same VM can point to the same | |
159 | * pgd here. This happens when running a guest using a | |
160 | * translation regime that isn't affected by its own stage-2 | |
161 | * translation, such as a non-VHE hypervisor running at vEL2, or | |
162 | * for vEL1/EL0 with vHCR_EL2.VM == 0. In that case, we use the | |
163 | * canonical stage-2 page tables. | |
164 | */ | |
a0e50aa3 | 165 | phys_addr_t pgd_phys; |
71233d05 | 166 | struct kvm_pgtable *pgt; |
4f8d6632 | 167 | |
fe49fd94 MZ |
168 | /* |
169 | * VTCR value used on the host. For a non-NV guest (or a NV | |
170 | * guest that runs in a context where its own S2 doesn't | |
171 | * apply), its T0SZ value reflects that of the IPA size. | |
172 | * | |
173 | * For a shadow S2 MMU, T0SZ reflects the PARange exposed to | |
174 | * the guest. | |
175 | */ | |
176 | u64 vtcr; | |
177 | ||
94d0e598 MZ |
178 | /* The last vcpu id that ran on each physical CPU */ |
179 | int __percpu *last_vcpu_ran; | |
180 | ||
2f440b72 RK |
181 | #define KVM_ARM_EAGER_SPLIT_CHUNK_SIZE_DEFAULT 0 |
182 | /* | |
183 | * Memory cache used to split | |
184 | * KVM_CAP_ARM_EAGER_SPLIT_CHUNK_SIZE worth of huge pages. It | |
185 | * is used to allocate stage2 page tables while splitting huge | |
186 | * pages. The choice of KVM_CAP_ARM_EAGER_SPLIT_CHUNK_SIZE | |
187 | * influences both the capacity of the split page cache, and | |
188 | * how often KVM reschedules. Be wary of raising CHUNK_SIZE | |
189 | * too high. | |
190 | * | |
191 | * Protected by kvm->slots_lock. | |
192 | */ | |
193 | struct kvm_mmu_memory_cache split_page_cache; | |
194 | uint64_t split_page_chunk_size; | |
195 | ||
cfb1a98d | 196 | struct kvm_arch *arch; |
4f128f8e MZ |
197 | |
198 | /* | |
199 | * For a shadow stage-2 MMU, the virtual vttbr used by the | |
200 | * host to parse the guest S2. | |
201 | * This either contains: | |
202 | * - the virtual VTTBR programmed by the guest hypervisor with | |
203 | * CnP cleared | |
204 | * - The value 1 (VMID=0, BADDR=0, CnP=1) if invalid | |
205 | * | |
206 | * We also cache the full VTCR which gets used for TLB invalidation, | |
207 | * taking the ARM ARM's "Any of the bits in VTCR_EL2 are permitted | |
208 | * to be cached in a TLB" to the letter. | |
209 | */ | |
210 | u64 tlb_vttbr; | |
211 | u64 tlb_vtcr; | |
212 | ||
213 | /* | |
214 | * true when this represents a nested context where virtual | |
215 | * HCR_EL2.VM == 1 | |
216 | */ | |
217 | bool nested_stage2_enabled; | |
218 | ||
c268f204 OU |
219 | /* |
220 | * true when this MMU needs to be unmapped before being used for a new | |
221 | * purpose. | |
222 | */ | |
223 | bool pending_unmap; | |
224 | ||
4f128f8e MZ |
225 | /* |
226 | * 0: Nobody is currently using this, check vttbr for validity | |
227 | * >0: Somebody is actively using this. | |
228 | */ | |
229 | atomic_t refcnt; | |
a0e50aa3 CD |
230 | }; |
231 | ||
8d14797b WD |
232 | struct kvm_arch_memory_slot { |
233 | }; | |
234 | ||
05714cab RRA |
235 | /** |
236 | * struct kvm_smccc_features: Descriptor of the hypercall services exposed to the guests | |
237 | * | |
238 | * @std_bmap: Bitmap of standard secure service calls | |
428fd678 | 239 | * @std_hyp_bmap: Bitmap of standard hypervisor service calls |
b22216e1 | 240 | * @vendor_hyp_bmap: Bitmap of vendor specific hypervisor service calls |
05714cab RRA |
241 | */ |
242 | struct kvm_smccc_features { | |
243 | unsigned long std_bmap; | |
428fd678 | 244 | unsigned long std_hyp_bmap; |
c0000e58 SK |
245 | unsigned long vendor_hyp_bmap; /* Function numbers 0-63 */ |
246 | unsigned long vendor_hyp_bmap_2; /* Function numbers 64-127 */ | |
05714cab RRA |
247 | }; |
248 | ||
a1ec5c70 FT |
249 | typedef unsigned int pkvm_handle_t; |
250 | ||
9d0c063a FT |
251 | struct kvm_protected_vm { |
252 | pkvm_handle_t handle; | |
f41dff4e | 253 | struct kvm_hyp_memcache teardown_mc; |
8c0d7d14 | 254 | struct kvm_hyp_memcache stage2_teardown_mc; |
b6ed4fa9 | 255 | bool enabled; |
9d0c063a FT |
256 | }; |
257 | ||
5544750e MZ |
258 | struct kvm_mpidr_data { |
259 | u64 mpidr_mask; | |
260 | DECLARE_FLEX_ARRAY(u16, cmpidr_to_idx); | |
261 | }; | |
262 | ||
263 | static inline u16 kvm_mpidr_index(struct kvm_mpidr_data *data, u64 mpidr) | |
264 | { | |
838d992b MZ |
265 | unsigned long index = 0, mask = data->mpidr_mask; |
266 | unsigned long aff = mpidr & MPIDR_HWID_BITMASK; | |
5544750e | 267 | |
838d992b | 268 | bitmap_gather(&index, &aff, &mask, fls(mask)); |
5544750e MZ |
269 | |
270 | return index; | |
271 | } | |
272 | ||
888f0880 MZ |
273 | struct kvm_sysreg_masks; |
274 | ||
2fd8f31c MZ |
275 | enum fgt_group_id { |
276 | __NO_FGT_GROUP__, | |
0f013a52 MZ |
277 | HFGRTR_GROUP, |
278 | HFGWTR_GROUP = HFGRTR_GROUP, | |
2fd8f31c MZ |
279 | HDFGRTR_GROUP, |
280 | HDFGWTR_GROUP = HDFGRTR_GROUP, | |
281 | HFGITR_GROUP, | |
282 | HAFGRTR_GROUP, | |
4bc0fe08 MZ |
283 | HFGRTR2_GROUP, |
284 | HFGWTR2_GROUP = HFGRTR2_GROUP, | |
285 | HDFGRTR2_GROUP, | |
286 | HDFGWTR2_GROUP = HDFGRTR2_GROUP, | |
287 | HFGITR2_GROUP, | |
2fd8f31c MZ |
288 | |
289 | /* Must be last */ | |
290 | __NR_FGT_GROUP_IDS__ | |
291 | }; | |
292 | ||
a0e50aa3 CD |
293 | struct kvm_arch { |
294 | struct kvm_s2_mmu mmu; | |
295 | ||
2fd8f31c MZ |
296 | /* |
297 | * Fine-Grained UNDEF, mimicking the FGT layout defined by the | |
298 | * architecture. We track them globally, as we present the | |
299 | * same feature-set to all vcpus. | |
300 | * | |
301 | * Index 0 is currently spare. | |
302 | */ | |
303 | u64 fgu[__NR_FGT_GROUP_IDS__]; | |
304 | ||
4f128f8e MZ |
305 | /* |
306 | * Stage 2 paging state for VMs with nested S2 using a virtual | |
307 | * VMID. | |
308 | */ | |
309 | struct kvm_s2_mmu *nested_mmus; | |
310 | size_t nested_mmus_size; | |
311 | int nested_mmus_next; | |
312 | ||
4f8d6632 MZ |
313 | /* Interrupt controller */ |
314 | struct vgic_dist vgic; | |
85bd0ba1 | 315 | |
47053904 MZ |
316 | /* Timers */ |
317 | struct arch_timer_vm_data timer_data; | |
318 | ||
85bd0ba1 MZ |
319 | /* Mandated version of PSCI */ |
320 | u32 psci_version; | |
c726200d | 321 | |
c43120af OU |
322 | /* Protects VM-scoped configuration data */ |
323 | struct mutex config_lock; | |
324 | ||
c726200d CD |
325 | /* |
326 | * If we encounter a data abort without valid instruction syndrome | |
327 | * information, report this to user space. User space can (and | |
328 | * should) opt in to this feature if KVM_CAP_ARM_NISV_TO_USER is | |
329 | * supported. | |
330 | */ | |
06394531 MZ |
331 | #define KVM_ARCH_FLAG_RETURN_NISV_IO_ABORT_TO_USER 0 |
332 | /* Memory Tagging Extension enabled for the guest */ | |
333 | #define KVM_ARCH_FLAG_MTE_ENABLED 1 | |
334 | /* At least one vCPU has ran in the VM */ | |
335 | #define KVM_ARCH_FLAG_HAS_RAN_ONCE 2 | |
2251e9ff OU |
336 | /* The vCPU feature set for the VM is configured */ |
337 | #define KVM_ARCH_FLAG_VCPU_FEATURES_CONFIGURED 3 | |
bfbab445 | 338 | /* PSCI SYSTEM_SUSPEND enabled for the guest */ |
2251e9ff | 339 | #define KVM_ARCH_FLAG_SYSTEM_SUSPEND_ENABLED 4 |
30ec7997 | 340 | /* VM counter offset */ |
2251e9ff | 341 | #define KVM_ARCH_FLAG_VM_COUNTER_OFFSET 5 |
8a5eb2d2 | 342 | /* Timer PPIs made immutable */ |
2251e9ff | 343 | #define KVM_ARCH_FLAG_TIMER_PPIS_IMMUTABLE 6 |
47334146 | 344 | /* Initial ID reg values loaded */ |
4202bcac | 345 | #define KVM_ARCH_FLAG_ID_REGS_INITIALIZED 7 |
c5bac1ef MZ |
346 | /* Fine-Grained UNDEF initialised */ |
347 | #define KVM_ARCH_FLAG_FGU_INITIALIZED 8 | |
41d6028e FT |
348 | /* SVE exposed to guest */ |
349 | #define KVM_ARCH_FLAG_GUEST_HAS_SVE 9 | |
3adaee78 SO |
350 | /* MIDR_EL1, REVIDR_EL1, and AIDR_EL1 are writable from userspace */ |
351 | #define KVM_ARCH_FLAG_WRITABLE_IMP_ID_REGS 10 | |
06394531 | 352 | unsigned long flags; |
fd65a3b5 | 353 | |
2251e9ff OU |
354 | /* VM-wide vCPU feature set */ |
355 | DECLARE_BITMAP(vcpu_features, KVM_VCPU_MAX_FEATURES); | |
356 | ||
5544750e MZ |
357 | /* MPIDR to vcpu index mapping, optional */ |
358 | struct kvm_mpidr_data *mpidr_data; | |
359 | ||
d7eec236 MZ |
360 | /* |
361 | * VM-wide PMU filter, implemented as a bitmap and big enough for | |
362 | * up to 2^10 events (ARMv8.0) or 2^16 events (ARMv8.1+). | |
363 | */ | |
364 | unsigned long *pmu_filter; | |
46b18782 | 365 | struct arm_pmu *arm_pmu; |
23711a5e | 366 | |
583cda1b | 367 | cpumask_var_t supported_cpus; |
23711a5e | 368 | |
f12b54d7 MZ |
369 | /* Maximum number of counters for the guest */ |
370 | u8 nr_pmu_counters; | |
4d20debf | 371 | |
89176658 MZ |
372 | /* Iterator for idreg debugfs */ |
373 | u8 idreg_debugfs_iter; | |
374 | ||
05714cab RRA |
375 | /* Hypercall features firmware registers' descriptor */ |
376 | struct kvm_smccc_features smccc_feat; | |
fb88707d | 377 | struct maple_tree smccc_filter; |
a1ec5c70 | 378 | |
47334146 JZ |
379 | /* |
380 | * Emulated CPU ID registers per VM | |
381 | * (Op0, Op1, CRn, CRm, Op2) of the ID registers to be saved in it | |
382 | * is (3, 0, 0, crm, op2), where 1<=crm<8, 0<=op2<8. | |
383 | * | |
384 | * These emulated idregs are VM-wide, but accessed from the context of a vCPU. | |
385 | * Atomic access to multiple idregs are guarded by kvm_arch.config_lock. | |
386 | */ | |
387 | #define IDREG_IDX(id) (((sys_reg_CRm(id) - 1) << 3) | sys_reg_Op2(id)) | |
47334146 JZ |
388 | #define KVM_ARM_ID_REG_NUM (IDREG_IDX(sys_reg(3, 0, 0, 7, 7)) + 1) |
389 | u64 id_regs[KVM_ARM_ID_REG_NUM]; | |
390 | ||
b4043e7c SO |
391 | u64 midr_el1; |
392 | u64 revidr_el1; | |
393 | u64 aidr_el1; | |
2843cae2 SO |
394 | u64 ctr_el0; |
395 | ||
a0162020 | 396 | /* Masks for VNCR-backed and general EL2 sysregs */ |
888f0880 MZ |
397 | struct kvm_sysreg_masks *sysreg_masks; |
398 | ||
4ffa72ad MZ |
399 | /* Count the number of VNCR_EL2 currently mapped */ |
400 | atomic_t vncr_map_count; | |
401 | ||
a1ec5c70 | 402 | /* |
9d0c063a | 403 | * For an untrusted host VM, 'pkvm.handle' is used to lookup |
a1ec5c70 FT |
404 | * the associated pKVM instance in the hypervisor. |
405 | */ | |
9d0c063a | 406 | struct kvm_protected_vm pkvm; |
4f8d6632 MZ |
407 | }; |
408 | ||
4f8d6632 | 409 | struct kvm_vcpu_fault_info { |
0b12620f | 410 | u64 esr_el2; /* Hyp Syndrom Register */ |
4f8d6632 MZ |
411 | u64 far_el2; /* Hyp Fault Address Register */ |
412 | u64 hpfar_el2; /* Hyp IPA Fault Address Register */ | |
0067df41 | 413 | u64 disr_el1; /* Deferred [SError] Status Register */ |
4f8d6632 MZ |
414 | }; |
415 | ||
d8bd48e3 MZ |
416 | /* |
417 | * VNCR() just places the VNCR_capable registers in the enum after | |
418 | * __VNCR_START__, and the value (after correction) to be an 8-byte offset | |
419 | * from the VNCR base. As we don't require the enum to be otherwise ordered, | |
420 | * we need the terrible hack below to ensure that we correctly size the | |
421 | * sys_regs array, no matter what. | |
422 | * | |
423 | * The __MAX__ macro has been lifted from Sean Eron Anderson's wonderful | |
424 | * treasure trove of bit hacks: | |
425 | * https://graphics.stanford.edu/~seander/bithacks.html#IntegerMinOrMax | |
426 | */ | |
427 | #define __MAX__(x,y) ((x) ^ (((x) ^ (y)) & -((x) < (y)))) | |
428 | #define VNCR(r) \ | |
429 | __before_##r, \ | |
430 | r = __VNCR_START__ + ((VNCR_ ## r) / 8), \ | |
431 | __after_##r = __MAX__(__before_##r - 1, r) | |
432 | ||
a0162020 MZ |
433 | #define MARKER(m) \ |
434 | m, __after_##m = m - 1 | |
435 | ||
9d8415d6 | 436 | enum vcpu_sysreg { |
8f7f4fe7 | 437 | __INVALID_SYSREG__, /* 0 is reserved as an invalid value */ |
9d8415d6 | 438 | MPIDR_EL1, /* MultiProcessor Affinity Register */ |
7af0c253 | 439 | CLIDR_EL1, /* Cache Level ID Register */ |
9d8415d6 | 440 | CSSELR_EL1, /* Cache Size Selection Register */ |
9d8415d6 MZ |
441 | TPIDR_EL0, /* Thread ID, User R/W */ |
442 | TPIDRRO_EL0, /* Thread ID, User R/O */ | |
443 | TPIDR_EL1, /* Thread ID, Privileged */ | |
9d8415d6 MZ |
444 | CNTKCTL_EL1, /* Timer Control Register (EL1) */ |
445 | PAR_EL1, /* Physical Address Register */ | |
9d8415d6 | 446 | MDCCINT_EL1, /* Monitor Debug Comms Channel Interrupt Enable Reg */ |
d42e2671 | 447 | OSLSR_EL1, /* OS Lock Status Register */ |
c773ae2b | 448 | DISR_EL1, /* Deferred Interrupt Status Register */ |
9d8415d6 | 449 | |
ab946834 SZ |
450 | /* Performance Monitors Registers */ |
451 | PMCR_EL0, /* Control Register */ | |
3965c3ce | 452 | PMSELR_EL0, /* Event Counter Selection Register */ |
051ff581 SZ |
453 | PMEVCNTR0_EL0, /* Event Counter Register (0-30) */ |
454 | PMEVCNTR30_EL0 = PMEVCNTR0_EL0 + 30, | |
455 | PMCCNTR_EL0, /* Cycle Counter Register */ | |
9feb21ac SZ |
456 | PMEVTYPER0_EL0, /* Event Type Register (0-30) */ |
457 | PMEVTYPER30_EL0 = PMEVTYPER0_EL0 + 30, | |
458 | PMCCFILTR_EL0, /* Cycle Count Filter Register */ | |
96b0eebc | 459 | PMCNTENSET_EL0, /* Count Enable Set Register */ |
9db52c78 | 460 | PMINTENSET_EL1, /* Interrupt Enable Set Register */ |
76d883c4 | 461 | PMOVSSET_EL0, /* Overflow Flag Status Set Register */ |
d692b8ad | 462 | PMUSERENR_EL0, /* User Enable Register */ |
ab946834 | 463 | |
384b40ca MR |
464 | /* Pointer Authentication Registers in a strict increasing order. */ |
465 | APIAKEYLO_EL1, | |
466 | APIAKEYHI_EL1, | |
467 | APIBKEYLO_EL1, | |
468 | APIBKEYHI_EL1, | |
469 | APDAKEYLO_EL1, | |
470 | APDAKEYHI_EL1, | |
471 | APDBKEYLO_EL1, | |
472 | APDBKEYHI_EL1, | |
473 | APGAKEYLO_EL1, | |
474 | APGAKEYHI_EL1, | |
475 | ||
e1f358b5 SP |
476 | /* Memory Tagging Extension registers */ |
477 | RGSR_EL1, /* Random Allocation Tag Seed Register */ | |
478 | GCR_EL1, /* Tag Control Register */ | |
e1f358b5 SP |
479 | TFSRE0_EL1, /* Tag Fault Status Register (EL0) */ |
480 | ||
b86c9bea JG |
481 | POR_EL0, /* Permission Overlay Register 0 (EL0) */ |
482 | ||
b5568894 MZ |
483 | /* FP/SIMD/SVE */ |
484 | SVCR, | |
7d9c1ed6 | 485 | FPMR, |
b5568894 | 486 | |
5305cc2c | 487 | /* 32bit specific registers. */ |
9d8415d6 MZ |
488 | DACR32_EL2, /* Domain Access Control Register */ |
489 | IFSR32_EL2, /* Instruction Fault Status Register */ | |
490 | FPEXC32_EL2, /* Floating-Point Exception Control Register */ | |
491 | DBGVCR32_EL2, /* Debug Vector Catch Register */ | |
492 | ||
5305cc2c | 493 | /* EL2 registers */ |
5305cc2c MZ |
494 | SCTLR_EL2, /* System Control Register (EL2) */ |
495 | ACTLR_EL2, /* Auxiliary Control Register (EL2) */ | |
5305cc2c | 496 | CPTR_EL2, /* Architectural Feature Trap Register (EL2) */ |
5305cc2c | 497 | HACR_EL2, /* Hypervisor Auxiliary Control Register */ |
b3d29a82 | 498 | ZCR_EL2, /* SVE Control Register (EL2) */ |
5305cc2c MZ |
499 | TTBR0_EL2, /* Translation Table Base Register 0 (EL2) */ |
500 | TTBR1_EL2, /* Translation Table Base Register 1 (EL2) */ | |
501 | TCR_EL2, /* Translation Control Register (EL2) */ | |
5f8d5a15 MZ |
502 | PIRE0_EL2, /* Permission Indirection Register 0 (EL2) */ |
503 | PIR_EL2, /* Permission Indirection Register 1 (EL2) */ | |
5970e990 | 504 | POR_EL2, /* Permission Overlay Register 2 (EL2) */ |
5305cc2c MZ |
505 | SPSR_EL2, /* EL2 saved program status register */ |
506 | ELR_EL2, /* EL2 exception link register */ | |
507 | AFSR0_EL2, /* Auxiliary Fault Status Register 0 (EL2) */ | |
508 | AFSR1_EL2, /* Auxiliary Fault Status Register 1 (EL2) */ | |
509 | ESR_EL2, /* Exception Syndrome Register (EL2) */ | |
510 | FAR_EL2, /* Fault Address Register (EL2) */ | |
511 | HPFAR_EL2, /* Hypervisor IPA Fault Address Register */ | |
512 | MAIR_EL2, /* Memory Attribute Indirection Register (EL2) */ | |
513 | AMAIR_EL2, /* Auxiliary Memory Attribute Indirection Register (EL2) */ | |
514 | VBAR_EL2, /* Vector Base Address Register (EL2) */ | |
515 | RVBAR_EL2, /* Reset Vector Base Address Register */ | |
516 | CONTEXTIDR_EL2, /* Context ID Register (EL2) */ | |
5305cc2c | 517 | SP_EL2, /* EL2 Stack Pointer */ |
81dc9504 MZ |
518 | CNTHP_CTL_EL2, |
519 | CNTHP_CVAL_EL2, | |
520 | CNTHV_CTL_EL2, | |
521 | CNTHV_CVAL_EL2, | |
5305cc2c | 522 | |
a0162020 MZ |
523 | /* Anything from this can be RES0/RES1 sanitised */ |
524 | MARKER(__SANITISED_REG_START__), | |
69c19e04 | 525 | TCR2_EL2, /* Extended Translation Control Register (EL2) */ |
eb609638 | 526 | MDCR_EL2, /* Monitor Debug Configuration Register (EL2) */ |
d1e37a50 | 527 | CNTHCTL_EL2, /* Counter-timer Hypervisor Control register */ |
a0162020 MZ |
528 | |
529 | /* Any VNCR-capable reg goes after this point */ | |
530 | MARKER(__VNCR_START__), | |
d8bd48e3 MZ |
531 | |
532 | VNCR(SCTLR_EL1),/* System Control Register */ | |
533 | VNCR(ACTLR_EL1),/* Auxiliary Control Register */ | |
534 | VNCR(CPACR_EL1),/* Coprocessor Access Control */ | |
535 | VNCR(ZCR_EL1), /* SVE Control */ | |
536 | VNCR(TTBR0_EL1),/* Translation Table Base Register 0 */ | |
537 | VNCR(TTBR1_EL1),/* Translation Table Base Register 1 */ | |
538 | VNCR(TCR_EL1), /* Translation Control Register */ | |
539 | VNCR(TCR2_EL1), /* Extended Translation Control Register */ | |
540 | VNCR(ESR_EL1), /* Exception Syndrome Register */ | |
541 | VNCR(AFSR0_EL1),/* Auxiliary Fault Status Register 0 */ | |
542 | VNCR(AFSR1_EL1),/* Auxiliary Fault Status Register 1 */ | |
543 | VNCR(FAR_EL1), /* Fault Address Register */ | |
544 | VNCR(MAIR_EL1), /* Memory Attribute Indirection Register */ | |
545 | VNCR(VBAR_EL1), /* Vector Base Address Register */ | |
546 | VNCR(CONTEXTIDR_EL1), /* Context ID Register */ | |
547 | VNCR(AMAIR_EL1),/* Aux Memory Attribute Indirection Register */ | |
548 | VNCR(MDSCR_EL1),/* Monitor Debug System Control Register */ | |
549 | VNCR(ELR_EL1), | |
550 | VNCR(SP_EL1), | |
551 | VNCR(SPSR_EL1), | |
552 | VNCR(TFSR_EL1), /* Tag Fault Status Register (EL1) */ | |
553 | VNCR(VPIDR_EL2),/* Virtualization Processor ID Register */ | |
554 | VNCR(VMPIDR_EL2),/* Virtualization Multiprocessor ID Register */ | |
555 | VNCR(HCR_EL2), /* Hypervisor Configuration Register */ | |
556 | VNCR(HSTR_EL2), /* Hypervisor System Trap Register */ | |
557 | VNCR(VTTBR_EL2),/* Virtualization Translation Table Base Register */ | |
558 | VNCR(VTCR_EL2), /* Virtualization Translation Control Register */ | |
559 | VNCR(TPIDR_EL2),/* EL2 Software Thread ID Register */ | |
560 | VNCR(HCRX_EL2), /* Extended Hypervisor Configuration Register */ | |
561 | ||
562 | /* Permission Indirection Extension registers */ | |
563 | VNCR(PIR_EL1), /* Permission Indirection Register 1 (EL1) */ | |
564 | VNCR(PIRE0_EL1), /* Permission Indirection Register 0 (EL1) */ | |
565 | ||
b86c9bea JG |
566 | VNCR(POR_EL1), /* Permission Overlay Register 1 (EL1) */ |
567 | ||
d8bd48e3 MZ |
568 | VNCR(HFGRTR_EL2), |
569 | VNCR(HFGWTR_EL2), | |
570 | VNCR(HFGITR_EL2), | |
571 | VNCR(HDFGRTR_EL2), | |
572 | VNCR(HDFGWTR_EL2), | |
d016264d | 573 | VNCR(HAFGRTR_EL2), |
df56f1cc MZ |
574 | VNCR(HFGRTR2_EL2), |
575 | VNCR(HFGWTR2_EL2), | |
576 | VNCR(HFGITR2_EL2), | |
577 | VNCR(HDFGRTR2_EL2), | |
578 | VNCR(HDFGWTR2_EL2), | |
d8bd48e3 | 579 | |
6fb75733 MZ |
580 | VNCR(VNCR_EL2), |
581 | ||
d8bd48e3 MZ |
582 | VNCR(CNTVOFF_EL2), |
583 | VNCR(CNTV_CVAL_EL0), | |
584 | VNCR(CNTV_CTL_EL0), | |
585 | VNCR(CNTP_CVAL_EL0), | |
586 | VNCR(CNTP_CTL_EL0), | |
587 | ||
182f1596 MZ |
588 | VNCR(ICH_LR0_EL2), |
589 | VNCR(ICH_LR1_EL2), | |
590 | VNCR(ICH_LR2_EL2), | |
591 | VNCR(ICH_LR3_EL2), | |
592 | VNCR(ICH_LR4_EL2), | |
593 | VNCR(ICH_LR5_EL2), | |
594 | VNCR(ICH_LR6_EL2), | |
595 | VNCR(ICH_LR7_EL2), | |
596 | VNCR(ICH_LR8_EL2), | |
597 | VNCR(ICH_LR9_EL2), | |
598 | VNCR(ICH_LR10_EL2), | |
599 | VNCR(ICH_LR11_EL2), | |
600 | VNCR(ICH_LR12_EL2), | |
601 | VNCR(ICH_LR13_EL2), | |
602 | VNCR(ICH_LR14_EL2), | |
603 | VNCR(ICH_LR15_EL2), | |
604 | ||
605 | VNCR(ICH_AP0R0_EL2), | |
606 | VNCR(ICH_AP0R1_EL2), | |
607 | VNCR(ICH_AP0R2_EL2), | |
608 | VNCR(ICH_AP0R3_EL2), | |
609 | VNCR(ICH_AP1R0_EL2), | |
610 | VNCR(ICH_AP1R1_EL2), | |
611 | VNCR(ICH_AP1R2_EL2), | |
612 | VNCR(ICH_AP1R3_EL2), | |
9f5deace | 613 | VNCR(ICH_HCR_EL2), |
182f1596 | 614 | VNCR(ICH_VMCR_EL2), |
9f5deace | 615 | |
9d8415d6 MZ |
616 | NR_SYS_REGS /* Nothing after this line! */ |
617 | }; | |
618 | ||
888f0880 MZ |
619 | struct kvm_sysreg_masks { |
620 | struct { | |
621 | u64 res0; | |
622 | u64 res1; | |
a0162020 | 623 | } mask[NR_SYS_REGS - __SANITISED_REG_START__]; |
888f0880 MZ |
624 | }; |
625 | ||
1b8570be MZ |
626 | struct fgt_masks { |
627 | const char *str; | |
628 | u64 mask; | |
629 | u64 nmask; | |
630 | u64 res0; | |
631 | }; | |
632 | ||
633 | extern struct fgt_masks hfgrtr_masks; | |
634 | extern struct fgt_masks hfgwtr_masks; | |
635 | extern struct fgt_masks hfgitr_masks; | |
636 | extern struct fgt_masks hdfgrtr_masks; | |
637 | extern struct fgt_masks hdfgwtr_masks; | |
638 | extern struct fgt_masks hafgrtr_masks; | |
4bc0fe08 MZ |
639 | extern struct fgt_masks hfgrtr2_masks; |
640 | extern struct fgt_masks hfgwtr2_masks; | |
641 | extern struct fgt_masks hfgitr2_masks; | |
642 | extern struct fgt_masks hdfgrtr2_masks; | |
643 | extern struct fgt_masks hdfgwtr2_masks; | |
1b8570be | 644 | |
311ba55a MZ |
645 | extern struct fgt_masks kvm_nvhe_sym(hfgrtr_masks); |
646 | extern struct fgt_masks kvm_nvhe_sym(hfgwtr_masks); | |
647 | extern struct fgt_masks kvm_nvhe_sym(hfgitr_masks); | |
648 | extern struct fgt_masks kvm_nvhe_sym(hdfgrtr_masks); | |
649 | extern struct fgt_masks kvm_nvhe_sym(hdfgwtr_masks); | |
650 | extern struct fgt_masks kvm_nvhe_sym(hafgrtr_masks); | |
4bc0fe08 MZ |
651 | extern struct fgt_masks kvm_nvhe_sym(hfgrtr2_masks); |
652 | extern struct fgt_masks kvm_nvhe_sym(hfgwtr2_masks); | |
653 | extern struct fgt_masks kvm_nvhe_sym(hfgitr2_masks); | |
654 | extern struct fgt_masks kvm_nvhe_sym(hdfgrtr2_masks); | |
655 | extern struct fgt_masks kvm_nvhe_sym(hdfgwtr2_masks); | |
311ba55a | 656 | |
4f8d6632 | 657 | struct kvm_cpu_context { |
e47c2055 MZ |
658 | struct user_pt_regs regs; /* sp = sp_el0 */ |
659 | ||
fd85b667 MZ |
660 | u64 spsr_abt; |
661 | u64 spsr_und; | |
662 | u64 spsr_irq; | |
663 | u64 spsr_fiq; | |
e47c2055 MZ |
664 | |
665 | struct user_fpsimd_state fp_regs; | |
666 | ||
5f7e02ae | 667 | u64 sys_regs[NR_SYS_REGS]; |
c97e166e JM |
668 | |
669 | struct kvm_vcpu *__hyp_running_vcpu; | |
d8bd48e3 MZ |
670 | |
671 | /* This pointer has to be 4kB aligned. */ | |
672 | u64 *vncr_array; | |
4f8d6632 MZ |
673 | }; |
674 | ||
66d5b53e FT |
675 | struct cpu_sve_state { |
676 | __u64 zcr_el1; | |
677 | ||
678 | /* | |
679 | * Ordering is important since __sve_save_state/__sve_restore_state | |
680 | * relies on it. | |
681 | */ | |
682 | __u32 fpsr; | |
683 | __u32 fpcr; | |
684 | ||
685 | /* Must be SVE_VQ_BYTES (128 bit) aligned. */ | |
686 | __u8 sve_regs[]; | |
687 | }; | |
688 | ||
87f842c6 MZ |
689 | /* |
690 | * This structure is instantiated on a per-CPU basis, and contains | |
691 | * data that is: | |
692 | * | |
693 | * - tied to a single physical CPU, and | |
694 | * - either have a lifetime that does not extend past vcpu_put() | |
695 | * - or is an invariant for the lifetime of the system | |
696 | * | |
697 | * Use host_data_ptr(field) as a way to access a pointer to such a | |
698 | * field. | |
699 | */ | |
630a1685 | 700 | struct kvm_host_data { |
d381e533 OU |
701 | #define KVM_HOST_DATA_FLAG_HAS_SPE 0 |
702 | #define KVM_HOST_DATA_FLAG_HAS_TRBE 1 | |
a665e3bc JC |
703 | #define KVM_HOST_DATA_FLAG_TRBE_ENABLED 4 |
704 | #define KVM_HOST_DATA_FLAG_EL1_TRACING_CONFIGURED 5 | |
bd914a98 | 705 | #define KVM_HOST_DATA_FLAG_VCPU_IN_HYP_CONTEXT 6 |
2a359e07 | 706 | #define KVM_HOST_DATA_FLAG_L1_VNCR_MAPPED 7 |
38131c02 OU |
707 | unsigned long flags; |
708 | ||
630a1685 | 709 | struct kvm_cpu_context host_ctxt; |
66d5b53e | 710 | |
1696fc21 | 711 | /* |
8eca7f6d | 712 | * Hyp VA. |
1696fc21 FT |
713 | * sve_state is only used in pKVM and if system_supports_sve(). |
714 | */ | |
8eca7f6d MR |
715 | struct cpu_sve_state *sve_state; |
716 | ||
717 | /* Used by pKVM only. */ | |
718 | u64 fpmr; | |
ef3be860 | 719 | |
5294afdb MZ |
720 | /* Ownership of the FP regs */ |
721 | enum { | |
722 | FP_STATE_FREE, | |
723 | FP_STATE_HOST_OWNED, | |
724 | FP_STATE_GUEST_OWNED, | |
725 | } fp_owner; | |
726 | ||
6db55734 MZ |
727 | /* |
728 | * host_debug_state contains the host registers which are | |
729 | * saved and restored during world switches. | |
730 | */ | |
2417218f | 731 | struct { |
6db55734 MZ |
732 | /* {Break,watch}point registers */ |
733 | struct kvm_guest_debug_arch regs; | |
734 | /* Statistical profiling extension */ | |
735 | u64 pmscr_el1; | |
736 | /* Self-hosted trace */ | |
737 | u64 trfcr_el1; | |
4bacd723 MZ |
738 | /* Values of trap registers for the host before guest entry. */ |
739 | u64 mdcr_el2; | |
6db55734 | 740 | } host_debug_state; |
2417218f | 741 | |
a665e3bc JC |
742 | /* Guest trace filter value */ |
743 | u64 trfcr_while_in_guest; | |
744 | ||
2417218f OU |
745 | /* Number of programmable event counters (PMCR_EL0.N) for this CPU */ |
746 | unsigned int nr_event_counters; | |
8c02c2bb OU |
747 | |
748 | /* Number of debug breakpoints/watchpoints for this CPU (minus 1) */ | |
749 | unsigned int debug_brps; | |
750 | unsigned int debug_wrps; | |
630a1685 AM |
751 | }; |
752 | ||
ff367fe4 DB |
753 | struct kvm_host_psci_config { |
754 | /* PSCI version used by host. */ | |
755 | u32 version; | |
12bdce4f | 756 | u32 smccc_version; |
ff367fe4 DB |
757 | |
758 | /* Function IDs used by host if version is v0.1. */ | |
759 | struct psci_0_1_function_ids function_ids_0_1; | |
760 | ||
767c973f MZ |
761 | bool psci_0_1_cpu_suspend_implemented; |
762 | bool psci_0_1_cpu_on_implemented; | |
763 | bool psci_0_1_cpu_off_implemented; | |
764 | bool psci_0_1_migrate_implemented; | |
ff367fe4 DB |
765 | }; |
766 | ||
767 | extern struct kvm_host_psci_config kvm_nvhe_sym(kvm_host_psci_config); | |
768 | #define kvm_host_psci_config CHOOSE_NVHE_SYM(kvm_host_psci_config) | |
769 | ||
61fe0c37 DB |
770 | extern s64 kvm_nvhe_sym(hyp_physvirt_offset); |
771 | #define hyp_physvirt_offset CHOOSE_NVHE_SYM(hyp_physvirt_offset) | |
772 | ||
773 | extern u64 kvm_nvhe_sym(hyp_cpu_logical_map)[NR_CPUS]; | |
774 | #define hyp_cpu_logical_map CHOOSE_NVHE_SYM(hyp_cpu_logical_map) | |
775 | ||
358b28f0 MZ |
776 | struct vcpu_reset_state { |
777 | unsigned long pc; | |
778 | unsigned long r0; | |
779 | bool be; | |
780 | bool reset; | |
781 | }; | |
782 | ||
ea8d3cf4 MZ |
783 | struct vncr_tlb; |
784 | ||
4f8d6632 MZ |
785 | struct kvm_vcpu_arch { |
786 | struct kvm_cpu_context ctxt; | |
0033cd93 | 787 | |
baa85152 MB |
788 | /* |
789 | * Guest floating point state | |
790 | * | |
791 | * The architecture has two main floating point extensions, | |
792 | * the original FPSIMD and SVE. These have overlapping | |
793 | * register views, with the FPSIMD V registers occupying the | |
794 | * low 128 bits of the SVE Z registers. When the core | |
795 | * floating point code saves the register state of a task it | |
796 | * records which view it saved in fp_type. | |
797 | */ | |
b43b5dd9 | 798 | void *sve_state; |
baa85152 | 799 | enum fp_type fp_type; |
b43b5dd9 | 800 | unsigned int sve_max_vl; |
4f8d6632 | 801 | |
a0e50aa3 CD |
802 | /* Stage 2 paging state used by the hardware on next switch */ |
803 | struct kvm_s2_mmu *hw_mmu; | |
804 | ||
1460b4b2 | 805 | /* Values of trap registers for the guest. */ |
4f8d6632 | 806 | u64 hcr_el2; |
84de212d | 807 | u64 hcrx_el2; |
d6c850dd | 808 | u64 mdcr_el2; |
4f8d6632 MZ |
809 | |
810 | /* Exception Information */ | |
811 | struct kvm_vcpu_fault_info fault; | |
812 | ||
690bacb8 | 813 | /* Configuration flags, set once and for all before the vcpu can run */ |
54ddda91 | 814 | u8 cflags; |
690bacb8 MZ |
815 | |
816 | /* Input flags to the hypervisor code, potentially cleared after use */ | |
54ddda91 | 817 | u8 iflags; |
690bacb8 MZ |
818 | |
819 | /* State flags for kernel bookkeeping, unused by the hypervisor code */ | |
54ddda91 | 820 | u8 sflags; |
690bacb8 | 821 | |
0fa4a313 MZ |
822 | /* |
823 | * Don't run the guest (internal implementation need). | |
824 | * | |
825 | * Contrary to the flags above, this is set/cleared outside of | |
826 | * a vcpu context, and thus cannot be mixed with the flags | |
827 | * themselves (or the flag accesses need to be made atomic). | |
828 | */ | |
829 | bool pause; | |
0c557ed4 | 830 | |
84e690bf AB |
831 | /* |
832 | * We maintain more than a single set of debug registers to support | |
833 | * debugging the guest from the host and to maintain separate host and | |
834 | * guest state during world switches. vcpu_debug_state are the debug | |
6db55734 MZ |
835 | * registers of the vcpu as the guest sees them. |
836 | * | |
837 | * external_debug_state contains the debug values we want to debug the | |
838 | * guest. This is set via the KVM_SET_GUEST_DEBUG ioctl. | |
84e690bf | 839 | */ |
84e690bf | 840 | struct kvm_guest_debug_arch vcpu_debug_state; |
834bf887 | 841 | struct kvm_guest_debug_arch external_debug_state; |
4ad3a0b8 | 842 | u64 external_mdscr_el1; |
84e690bf | 843 | |
cd9b1010 OU |
844 | enum { |
845 | VCPU_DEBUG_FREE, | |
846 | VCPU_DEBUG_HOST_OWNED, | |
847 | VCPU_DEBUG_GUEST_OWNED, | |
848 | } debug_owner; | |
849 | ||
4f8d6632 MZ |
850 | /* VGIC state */ |
851 | struct vgic_cpu vgic_cpu; | |
852 | struct arch_timer_cpu timer_cpu; | |
04fe4726 | 853 | struct kvm_pmu pmu; |
4f8d6632 | 854 | |
b171f9bb OU |
855 | /* vcpu power state */ |
856 | struct kvm_mp_state mp_state; | |
0acc7239 | 857 | spinlock_t mp_state_lock; |
4f8d6632 | 858 | |
4f8d6632 MZ |
859 | /* Cache some mmu pages needed inside spinlock regions */ |
860 | struct kvm_mmu_memory_cache mmu_page_cache; | |
861 | ||
d0bd3e65 QP |
862 | /* Pages to top-up the pKVM/EL2 guest pool */ |
863 | struct kvm_hyp_memcache pkvm_memcache; | |
864 | ||
4715c14b JM |
865 | /* Virtual SError ESR to restore when HCR_EL2.VSE is set */ |
866 | u64 vsesr_el2; | |
d47533da | 867 | |
358b28f0 MZ |
868 | /* Additional reset state */ |
869 | struct vcpu_reset_state reset_state; | |
870 | ||
8564d637 SP |
871 | /* Guest PV state */ |
872 | struct { | |
8564d637 SP |
873 | u64 last_steal; |
874 | gpa_t base; | |
875 | } steal; | |
7af0c253 AO |
876 | |
877 | /* Per-vcpu CCSIDR override or NULL */ | |
878 | u32 *ccsidr; | |
ea8d3cf4 MZ |
879 | |
880 | /* Per-vcpu TLB for VNCR_EL2 -- NULL when !NV */ | |
881 | struct vncr_tlb *vncr_tlb; | |
4f8d6632 MZ |
882 | }; |
883 | ||
e87abb73 MZ |
884 | /* |
885 | * Each 'flag' is composed of a comma-separated triplet: | |
886 | * | |
887 | * - the flag-set it belongs to in the vcpu->arch structure | |
888 | * - the value for that flag | |
889 | * - the mask for that flag | |
890 | * | |
891 | * __vcpu_single_flag() builds such a triplet for a single-bit flag. | |
892 | * unpack_vcpu_flag() extract the flag value from the triplet for | |
893 | * direct use outside of the flag accessors. | |
894 | */ | |
895 | #define __vcpu_single_flag(_set, _f) _set, (_f), (_f) | |
896 | ||
897 | #define __unpack_flag(_set, _f, _m) _f | |
898 | #define unpack_vcpu_flag(...) __unpack_flag(__VA_ARGS__) | |
899 | ||
5a3984f4 MZ |
900 | #define __build_check_flag(v, flagset, f, m) \ |
901 | do { \ | |
902 | typeof(v->arch.flagset) *_fset; \ | |
903 | \ | |
904 | /* Check that the flags fit in the mask */ \ | |
905 | BUILD_BUG_ON(HWEIGHT(m) != HWEIGHT((f) | (m))); \ | |
906 | /* Check that the flags fit in the type */ \ | |
907 | BUILD_BUG_ON((sizeof(*_fset) * 8) <= __fls(m)); \ | |
908 | } while (0) | |
909 | ||
e87abb73 MZ |
910 | #define __vcpu_get_flag(v, flagset, f, m) \ |
911 | ({ \ | |
5a3984f4 MZ |
912 | __build_check_flag(v, flagset, f, m); \ |
913 | \ | |
35dcb3ac | 914 | READ_ONCE(v->arch.flagset) & (m); \ |
e87abb73 MZ |
915 | }) |
916 | ||
35dcb3ac MZ |
917 | /* |
918 | * Note that the set/clear accessors must be preempt-safe in order to | |
919 | * avoid nesting them with load/put which also manipulate flags... | |
920 | */ | |
921 | #ifdef __KVM_NVHE_HYPERVISOR__ | |
922 | /* the nVHE hypervisor is always non-preemptible */ | |
923 | #define __vcpu_flags_preempt_disable() | |
924 | #define __vcpu_flags_preempt_enable() | |
925 | #else | |
926 | #define __vcpu_flags_preempt_disable() preempt_disable() | |
927 | #define __vcpu_flags_preempt_enable() preempt_enable() | |
928 | #endif | |
929 | ||
e87abb73 MZ |
930 | #define __vcpu_set_flag(v, flagset, f, m) \ |
931 | do { \ | |
932 | typeof(v->arch.flagset) *fset; \ | |
933 | \ | |
5a3984f4 MZ |
934 | __build_check_flag(v, flagset, f, m); \ |
935 | \ | |
e87abb73 | 936 | fset = &v->arch.flagset; \ |
35dcb3ac | 937 | __vcpu_flags_preempt_disable(); \ |
e87abb73 MZ |
938 | if (HWEIGHT(m) > 1) \ |
939 | *fset &= ~(m); \ | |
940 | *fset |= (f); \ | |
35dcb3ac | 941 | __vcpu_flags_preempt_enable(); \ |
e87abb73 MZ |
942 | } while (0) |
943 | ||
944 | #define __vcpu_clear_flag(v, flagset, f, m) \ | |
945 | do { \ | |
946 | typeof(v->arch.flagset) *fset; \ | |
947 | \ | |
5a3984f4 MZ |
948 | __build_check_flag(v, flagset, f, m); \ |
949 | \ | |
e87abb73 | 950 | fset = &v->arch.flagset; \ |
35dcb3ac | 951 | __vcpu_flags_preempt_disable(); \ |
e87abb73 | 952 | *fset &= ~(m); \ |
35dcb3ac | 953 | __vcpu_flags_preempt_enable(); \ |
e87abb73 MZ |
954 | } while (0) |
955 | ||
956 | #define vcpu_get_flag(v, ...) __vcpu_get_flag((v), __VA_ARGS__) | |
957 | #define vcpu_set_flag(v, ...) __vcpu_set_flag((v), __VA_ARGS__) | |
958 | #define vcpu_clear_flag(v, ...) __vcpu_clear_flag((v), __VA_ARGS__) | |
959 | ||
41d6028e FT |
960 | /* KVM_ARM_VCPU_INIT completed */ |
961 | #define VCPU_INITIALIZED __vcpu_single_flag(cflags, BIT(0)) | |
4c0680d3 MZ |
962 | /* SVE config completed */ |
963 | #define VCPU_SVE_FINALIZED __vcpu_single_flag(cflags, BIT(1)) | |
1eab1154 FT |
964 | /* pKVM VCPU setup completed */ |
965 | #define VCPU_PKVM_FINALIZED __vcpu_single_flag(cflags, BIT(2)) | |
4c0680d3 | 966 | |
699bb2e0 MZ |
967 | /* Exception pending */ |
968 | #define PENDING_EXCEPTION __vcpu_single_flag(iflags, BIT(0)) | |
969 | /* | |
970 | * PC increment. Overlaps with EXCEPT_MASK on purpose so that it can't | |
971 | * be set together with an exception... | |
972 | */ | |
973 | #define INCREMENT_PC __vcpu_single_flag(iflags, BIT(1)) | |
974 | /* Target EL/MODE (not a single flag, but let's abuse the macro) */ | |
975 | #define EXCEPT_MASK __vcpu_single_flag(iflags, GENMASK(3, 1)) | |
976 | ||
977 | /* Helpers to encode exceptions with minimum fuss */ | |
978 | #define __EXCEPT_MASK_VAL unpack_vcpu_flag(EXCEPT_MASK) | |
979 | #define __EXCEPT_SHIFT __builtin_ctzl(__EXCEPT_MASK_VAL) | |
980 | #define __vcpu_except_flags(_f) iflags, (_f << __EXCEPT_SHIFT), __EXCEPT_MASK_VAL | |
981 | ||
982 | /* | |
983 | * When PENDING_EXCEPTION is set, EXCEPT_MASK can take the following | |
984 | * values: | |
985 | * | |
986 | * For AArch32 EL1: | |
987 | */ | |
988 | #define EXCEPT_AA32_UND __vcpu_except_flags(0) | |
989 | #define EXCEPT_AA32_IABT __vcpu_except_flags(1) | |
990 | #define EXCEPT_AA32_DABT __vcpu_except_flags(2) | |
991 | /* For AArch64: */ | |
992 | #define EXCEPT_AA64_EL1_SYNC __vcpu_except_flags(0) | |
993 | #define EXCEPT_AA64_EL1_IRQ __vcpu_except_flags(1) | |
994 | #define EXCEPT_AA64_EL1_FIQ __vcpu_except_flags(2) | |
995 | #define EXCEPT_AA64_EL1_SERR __vcpu_except_flags(3) | |
47f3a2fc | 996 | /* For AArch64 with NV: */ |
699bb2e0 MZ |
997 | #define EXCEPT_AA64_EL2_SYNC __vcpu_except_flags(4) |
998 | #define EXCEPT_AA64_EL2_IRQ __vcpu_except_flags(5) | |
999 | #define EXCEPT_AA64_EL2_FIQ __vcpu_except_flags(6) | |
1000 | #define EXCEPT_AA64_EL2_SERR __vcpu_except_flags(7) | |
e87abb73 | 1001 | |
aff3ccd7 | 1002 | /* Physical CPU not in supported_cpus */ |
d381e533 | 1003 | #define ON_UNSUPPORTED_CPU __vcpu_single_flag(sflags, BIT(0)) |
eebc538d | 1004 | /* WFIT instruction trapped */ |
d381e533 | 1005 | #define IN_WFIT __vcpu_single_flag(sflags, BIT(1)) |
30b6ab45 | 1006 | /* vcpu system registers loaded on physical CPU */ |
d381e533 | 1007 | #define SYSREGS_ON_CPU __vcpu_single_flag(sflags, BIT(2)) |
2ca3f03b MZ |
1008 | /* Software step state is Active-pending for external debug */ |
1009 | #define HOST_SS_ACTIVE_PENDING __vcpu_single_flag(sflags, BIT(3)) | |
1010 | /* Software step state is Active pending for guest debug */ | |
1011 | #define GUEST_SS_ACTIVE_PENDING __vcpu_single_flag(sflags, BIT(4)) | |
0c2f9acf | 1012 | /* PMUSERENR for the guest EL0 is on physical CPU */ |
2ca3f03b | 1013 | #define PMUSERENR_ON_CPU __vcpu_single_flag(sflags, BIT(5)) |
b321c31c | 1014 | /* WFI instruction trapped */ |
2ca3f03b | 1015 | #define IN_WFI __vcpu_single_flag(sflags, BIT(6)) |
93078ae6 OU |
1016 | /* KVM is currently emulating a nested ERET */ |
1017 | #define IN_NESTED_ERET __vcpu_single_flag(sflags, BIT(7)) | |
370531d1 | 1018 | |
0affa37f | 1019 | |
b43b5dd9 | 1020 | /* Pointer to the vcpu's SVE FFR for sve_{save,load}_state() */ |
985d3a1b MZ |
1021 | #define vcpu_sve_pffr(vcpu) (kern_hyp_va((vcpu)->arch.sve_state) + \ |
1022 | sve_ffr_offset((vcpu)->arch.sve_max_vl)) | |
b43b5dd9 | 1023 | |
468f3477 | 1024 | #define vcpu_sve_max_vq(vcpu) sve_vq_from_vl((vcpu)->arch.sve_max_vl) |
b43b5dd9 | 1025 | |
069da3ff OU |
1026 | #define vcpu_sve_zcr_elx(vcpu) \ |
1027 | (unlikely(is_hyp_ctxt(vcpu)) ? ZCR_EL2 : ZCR_EL1) | |
1028 | ||
5db1bef9 | 1029 | #define sve_state_size_from_vl(sve_max_vl) ({ \ |
e1c9c983 | 1030 | size_t __size_ret; \ |
5db1bef9 | 1031 | unsigned int __vq; \ |
e1c9c983 | 1032 | \ |
5db1bef9 | 1033 | if (WARN_ON(!sve_vl_valid(sve_max_vl))) { \ |
e1c9c983 DM |
1034 | __size_ret = 0; \ |
1035 | } else { \ | |
5db1bef9 FT |
1036 | __vq = sve_vq_from_vl(sve_max_vl); \ |
1037 | __size_ret = SVE_SIG_REGS_SIZE(__vq); \ | |
e1c9c983 DM |
1038 | } \ |
1039 | \ | |
1040 | __size_ret; \ | |
1041 | }) | |
1042 | ||
5db1bef9 FT |
1043 | #define vcpu_sve_state_size(vcpu) sve_state_size_from_vl((vcpu)->arch.sve_max_vl) |
1044 | ||
892fd259 MZ |
1045 | #define KVM_GUESTDBG_VALID_MASK (KVM_GUESTDBG_ENABLE | \ |
1046 | KVM_GUESTDBG_USE_SW_BP | \ | |
1047 | KVM_GUESTDBG_USE_HW | \ | |
1048 | KVM_GUESTDBG_SINGLESTEP) | |
e650b64f | 1049 | |
41d6028e FT |
1050 | #define kvm_has_sve(kvm) (system_supports_sve() && \ |
1051 | test_bit(KVM_ARCH_FLAG_GUEST_HAS_SVE, &(kvm)->arch.flags)) | |
1052 | ||
1053 | #ifdef __KVM_NVHE_HYPERVISOR__ | |
1054 | #define vcpu_has_sve(vcpu) kvm_has_sve(kern_hyp_va((vcpu)->kvm)) | |
1055 | #else | |
1056 | #define vcpu_has_sve(vcpu) kvm_has_sve((vcpu)->kvm) | |
1057 | #endif | |
fa89d31c | 1058 | |
bf4086b1 MZ |
1059 | #ifdef CONFIG_ARM64_PTR_AUTH |
1060 | #define vcpu_has_ptrauth(vcpu) \ | |
1061 | ((cpus_have_final_cap(ARM64_HAS_ADDRESS_AUTH) || \ | |
1062 | cpus_have_final_cap(ARM64_HAS_GENERIC_AUTH)) && \ | |
c5c17635 FT |
1063 | (vcpu_has_feature(vcpu, KVM_ARM_VCPU_PTRAUTH_ADDRESS) || \ |
1064 | vcpu_has_feature(vcpu, KVM_ARM_VCPU_PTRAUTH_GENERIC))) | |
bf4086b1 MZ |
1065 | #else |
1066 | #define vcpu_has_ptrauth(vcpu) false | |
1067 | #endif | |
b890d75c | 1068 | |
583cda1b | 1069 | #define vcpu_on_unsupported_cpu(vcpu) \ |
aff3ccd7 | 1070 | vcpu_get_flag(vcpu, ON_UNSUPPORTED_CPU) |
583cda1b AE |
1071 | |
1072 | #define vcpu_set_on_unsupported_cpu(vcpu) \ | |
aff3ccd7 | 1073 | vcpu_set_flag(vcpu, ON_UNSUPPORTED_CPU) |
583cda1b AE |
1074 | |
1075 | #define vcpu_clear_on_unsupported_cpu(vcpu) \ | |
aff3ccd7 | 1076 | vcpu_clear_flag(vcpu, ON_UNSUPPORTED_CPU) |
583cda1b | 1077 | |
e47c2055 | 1078 | #define vcpu_gp_regs(v) (&(v)->arch.ctxt.regs) |
8d404c4c CD |
1079 | |
1080 | /* | |
1b422dd7 MZ |
1081 | * Only use __vcpu_sys_reg/ctxt_sys_reg if you know you want the |
1082 | * memory backed version of a register, and not the one most recently | |
1083 | * accessed by a running VCPU. For example, for userspace access or | |
1084 | * for system registers that are never context switched, but only | |
1085 | * emulated. | |
d8bd48e3 MZ |
1086 | * |
1087 | * Don't bother with VNCR-based accesses in the nVHE code, it has no | |
1088 | * business dealing with NV. | |
8d404c4c | 1089 | */ |
1b06b99f | 1090 | static inline u64 *___ctxt_sys_reg(const struct kvm_cpu_context *ctxt, int r) |
d8bd48e3 MZ |
1091 | { |
1092 | #if !defined (__KVM_NVHE_HYPERVISOR__) | |
1093 | if (unlikely(cpus_have_final_cap(ARM64_HAS_NESTED_VIRT) && | |
1094 | r >= __VNCR_START__ && ctxt->vncr_array)) | |
1095 | return &ctxt->vncr_array[r - __VNCR_START__]; | |
1096 | #endif | |
1097 | return (u64 *)&ctxt->sys_regs[r]; | |
1098 | } | |
1b422dd7 | 1099 | |
1b06b99f MZ |
1100 | #define __ctxt_sys_reg(c,r) \ |
1101 | ({ \ | |
1102 | BUILD_BUG_ON(__builtin_constant_p(r) && \ | |
1103 | (r) >= NR_SYS_REGS); \ | |
1104 | ___ctxt_sys_reg(c, r); \ | |
1105 | }) | |
1106 | ||
1b422dd7 MZ |
1107 | #define ctxt_sys_reg(c,r) (*__ctxt_sys_reg(c,r)) |
1108 | ||
a0162020 | 1109 | u64 kvm_vcpu_apply_reg_masks(const struct kvm_vcpu *, enum vcpu_sysreg, u64); |
6678791e MZ |
1110 | |
1111 | #define __vcpu_assign_sys_reg(v, r, val) \ | |
1112 | do { \ | |
1113 | const struct kvm_cpu_context *ctxt = &(v)->arch.ctxt; \ | |
1114 | u64 __v = (val); \ | |
1115 | if (vcpu_has_nv((v)) && (r) >= __SANITISED_REG_START__) \ | |
1116 | __v = kvm_vcpu_apply_reg_masks((v), (r), __v); \ | |
1117 | \ | |
1118 | ctxt_sys_reg(ctxt, (r)) = __v; \ | |
1119 | } while (0) | |
1120 | ||
8800b7c4 MZ |
1121 | #define __vcpu_rmw_sys_reg(v, r, op, val) \ |
1122 | do { \ | |
1123 | const struct kvm_cpu_context *ctxt = &(v)->arch.ctxt; \ | |
1124 | u64 __v = ctxt_sys_reg(ctxt, (r)); \ | |
1125 | __v op (val); \ | |
1126 | if (vcpu_has_nv((v)) && (r) >= __SANITISED_REG_START__) \ | |
1127 | __v = kvm_vcpu_apply_reg_masks((v), (r), __v); \ | |
1128 | \ | |
1129 | ctxt_sys_reg(ctxt, (r)) = __v; \ | |
1130 | } while (0) | |
1131 | ||
888f0880 | 1132 | #define __vcpu_sys_reg(v,r) \ |
b5fa1f91 | 1133 | ({ \ |
888f0880 | 1134 | const struct kvm_cpu_context *ctxt = &(v)->arch.ctxt; \ |
b5fa1f91 | 1135 | u64 __v = ctxt_sys_reg(ctxt, (r)); \ |
a0162020 | 1136 | if (vcpu_has_nv((v)) && (r) >= __SANITISED_REG_START__) \ |
b5fa1f91 MZ |
1137 | __v = kvm_vcpu_apply_reg_masks((v), (r), __v); \ |
1138 | __v; \ | |
1139 | }) | |
8d404c4c | 1140 | |
da6f1666 | 1141 | u64 vcpu_read_sys_reg(const struct kvm_vcpu *vcpu, int reg); |
d47533da | 1142 | void vcpu_write_sys_reg(struct kvm_vcpu *vcpu, u64 val, int reg); |
8d404c4c | 1143 | |
21c81001 MZ |
1144 | static inline bool __vcpu_read_sys_reg_from_cpu(int reg, u64 *val) |
1145 | { | |
1146 | /* | |
1147 | * *** VHE ONLY *** | |
1148 | * | |
1149 | * System registers listed in the switch are not saved on every | |
1150 | * exit from the guest but are only saved on vcpu_put. | |
1151 | * | |
1152 | * Note that MPIDR_EL1 for the guest is set by KVM via VMPIDR_EL2 but | |
1153 | * should never be listed below, because the guest cannot modify its | |
1154 | * own MPIDR_EL1 and MPIDR_EL1 is accessed for VCPU A from VCPU B's | |
1155 | * thread when emulating cross-VCPU communication. | |
1156 | */ | |
1157 | if (!has_vhe()) | |
1158 | return false; | |
1159 | ||
1160 | switch (reg) { | |
21c81001 MZ |
1161 | case SCTLR_EL1: *val = read_sysreg_s(SYS_SCTLR_EL12); break; |
1162 | case CPACR_EL1: *val = read_sysreg_s(SYS_CPACR_EL12); break; | |
1163 | case TTBR0_EL1: *val = read_sysreg_s(SYS_TTBR0_EL12); break; | |
1164 | case TTBR1_EL1: *val = read_sysreg_s(SYS_TTBR1_EL12); break; | |
1165 | case TCR_EL1: *val = read_sysreg_s(SYS_TCR_EL12); break; | |
14ca930d MZ |
1166 | case TCR2_EL1: *val = read_sysreg_s(SYS_TCR2_EL12); break; |
1167 | case PIR_EL1: *val = read_sysreg_s(SYS_PIR_EL12); break; | |
1168 | case PIRE0_EL1: *val = read_sysreg_s(SYS_PIRE0_EL12); break; | |
5970e990 | 1169 | case POR_EL1: *val = read_sysreg_s(SYS_POR_EL12); break; |
21c81001 MZ |
1170 | case ESR_EL1: *val = read_sysreg_s(SYS_ESR_EL12); break; |
1171 | case AFSR0_EL1: *val = read_sysreg_s(SYS_AFSR0_EL12); break; | |
1172 | case AFSR1_EL1: *val = read_sysreg_s(SYS_AFSR1_EL12); break; | |
1173 | case FAR_EL1: *val = read_sysreg_s(SYS_FAR_EL12); break; | |
1174 | case MAIR_EL1: *val = read_sysreg_s(SYS_MAIR_EL12); break; | |
1175 | case VBAR_EL1: *val = read_sysreg_s(SYS_VBAR_EL12); break; | |
1176 | case CONTEXTIDR_EL1: *val = read_sysreg_s(SYS_CONTEXTIDR_EL12);break; | |
1177 | case TPIDR_EL0: *val = read_sysreg_s(SYS_TPIDR_EL0); break; | |
1178 | case TPIDRRO_EL0: *val = read_sysreg_s(SYS_TPIDRRO_EL0); break; | |
1179 | case TPIDR_EL1: *val = read_sysreg_s(SYS_TPIDR_EL1); break; | |
1180 | case AMAIR_EL1: *val = read_sysreg_s(SYS_AMAIR_EL12); break; | |
1181 | case CNTKCTL_EL1: *val = read_sysreg_s(SYS_CNTKCTL_EL12); break; | |
1182 | case ELR_EL1: *val = read_sysreg_s(SYS_ELR_EL12); break; | |
fedc6123 | 1183 | case SPSR_EL1: *val = read_sysreg_s(SYS_SPSR_EL12); break; |
21c81001 MZ |
1184 | case PAR_EL1: *val = read_sysreg_par(); break; |
1185 | case DACR32_EL2: *val = read_sysreg_s(SYS_DACR32_EL2); break; | |
1186 | case IFSR32_EL2: *val = read_sysreg_s(SYS_IFSR32_EL2); break; | |
1187 | case DBGVCR32_EL2: *val = read_sysreg_s(SYS_DBGVCR32_EL2); break; | |
b3d29a82 | 1188 | case ZCR_EL1: *val = read_sysreg_s(SYS_ZCR_EL12); break; |
21c81001 MZ |
1189 | default: return false; |
1190 | } | |
1191 | ||
1192 | return true; | |
1193 | } | |
1194 | ||
1195 | static inline bool __vcpu_write_sys_reg_to_cpu(u64 val, int reg) | |
1196 | { | |
1197 | /* | |
1198 | * *** VHE ONLY *** | |
1199 | * | |
1200 | * System registers listed in the switch are not restored on every | |
1201 | * entry to the guest but are only restored on vcpu_load. | |
1202 | * | |
1203 | * Note that MPIDR_EL1 for the guest is set by KVM via VMPIDR_EL2 but | |
1204 | * should never be listed below, because the MPIDR should only be set | |
1205 | * once, before running the VCPU, and never changed later. | |
1206 | */ | |
1207 | if (!has_vhe()) | |
1208 | return false; | |
1209 | ||
1210 | switch (reg) { | |
21c81001 MZ |
1211 | case SCTLR_EL1: write_sysreg_s(val, SYS_SCTLR_EL12); break; |
1212 | case CPACR_EL1: write_sysreg_s(val, SYS_CPACR_EL12); break; | |
1213 | case TTBR0_EL1: write_sysreg_s(val, SYS_TTBR0_EL12); break; | |
1214 | case TTBR1_EL1: write_sysreg_s(val, SYS_TTBR1_EL12); break; | |
1215 | case TCR_EL1: write_sysreg_s(val, SYS_TCR_EL12); break; | |
14ca930d MZ |
1216 | case TCR2_EL1: write_sysreg_s(val, SYS_TCR2_EL12); break; |
1217 | case PIR_EL1: write_sysreg_s(val, SYS_PIR_EL12); break; | |
1218 | case PIRE0_EL1: write_sysreg_s(val, SYS_PIRE0_EL12); break; | |
5970e990 | 1219 | case POR_EL1: write_sysreg_s(val, SYS_POR_EL12); break; |
21c81001 MZ |
1220 | case ESR_EL1: write_sysreg_s(val, SYS_ESR_EL12); break; |
1221 | case AFSR0_EL1: write_sysreg_s(val, SYS_AFSR0_EL12); break; | |
1222 | case AFSR1_EL1: write_sysreg_s(val, SYS_AFSR1_EL12); break; | |
1223 | case FAR_EL1: write_sysreg_s(val, SYS_FAR_EL12); break; | |
1224 | case MAIR_EL1: write_sysreg_s(val, SYS_MAIR_EL12); break; | |
1225 | case VBAR_EL1: write_sysreg_s(val, SYS_VBAR_EL12); break; | |
1226 | case CONTEXTIDR_EL1: write_sysreg_s(val, SYS_CONTEXTIDR_EL12);break; | |
1227 | case TPIDR_EL0: write_sysreg_s(val, SYS_TPIDR_EL0); break; | |
1228 | case TPIDRRO_EL0: write_sysreg_s(val, SYS_TPIDRRO_EL0); break; | |
1229 | case TPIDR_EL1: write_sysreg_s(val, SYS_TPIDR_EL1); break; | |
1230 | case AMAIR_EL1: write_sysreg_s(val, SYS_AMAIR_EL12); break; | |
1231 | case CNTKCTL_EL1: write_sysreg_s(val, SYS_CNTKCTL_EL12); break; | |
1232 | case ELR_EL1: write_sysreg_s(val, SYS_ELR_EL12); break; | |
fedc6123 | 1233 | case SPSR_EL1: write_sysreg_s(val, SYS_SPSR_EL12); break; |
21c81001 MZ |
1234 | case PAR_EL1: write_sysreg_s(val, SYS_PAR_EL1); break; |
1235 | case DACR32_EL2: write_sysreg_s(val, SYS_DACR32_EL2); break; | |
1236 | case IFSR32_EL2: write_sysreg_s(val, SYS_IFSR32_EL2); break; | |
1237 | case DBGVCR32_EL2: write_sysreg_s(val, SYS_DBGVCR32_EL2); break; | |
b3d29a82 | 1238 | case ZCR_EL1: write_sysreg_s(val, SYS_ZCR_EL12); break; |
21c81001 MZ |
1239 | default: return false; |
1240 | } | |
1241 | ||
1242 | return true; | |
1243 | } | |
1244 | ||
4f8d6632 | 1245 | struct kvm_vm_stat { |
0193cc90 | 1246 | struct kvm_vm_stat_generic generic; |
4f8d6632 MZ |
1247 | }; |
1248 | ||
1249 | struct kvm_vcpu_stat { | |
0193cc90 | 1250 | struct kvm_vcpu_stat_generic generic; |
8a7e75d4 | 1251 | u64 hvc_exit_stat; |
b19e6892 AT |
1252 | u64 wfe_exit_stat; |
1253 | u64 wfi_exit_stat; | |
1254 | u64 mmio_exit_user; | |
1255 | u64 mmio_exit_kernel; | |
fe5161d2 | 1256 | u64 signal_exits; |
b19e6892 | 1257 | u64 exits; |
4f8d6632 MZ |
1258 | }; |
1259 | ||
4f8d6632 MZ |
1260 | unsigned long kvm_arm_num_regs(struct kvm_vcpu *vcpu); |
1261 | int kvm_arm_copy_reg_indices(struct kvm_vcpu *vcpu, u64 __user *indices); | |
4f8d6632 MZ |
1262 | int kvm_arm_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg); |
1263 | int kvm_arm_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg); | |
6ac4a5ac MZ |
1264 | |
1265 | unsigned long kvm_arm_num_sys_reg_descs(struct kvm_vcpu *vcpu); | |
1266 | int kvm_arm_copy_sys_reg_indices(struct kvm_vcpu *vcpu, u64 __user *uindices); | |
6ac4a5ac | 1267 | |
539aee0e JM |
1268 | int __kvm_arm_vcpu_get_events(struct kvm_vcpu *vcpu, |
1269 | struct kvm_vcpu_events *events); | |
b7b27fac | 1270 | |
539aee0e JM |
1271 | int __kvm_arm_vcpu_set_events(struct kvm_vcpu *vcpu, |
1272 | struct kvm_vcpu_events *events); | |
4f8d6632 | 1273 | |
b13216cf CD |
1274 | void kvm_arm_halt_guest(struct kvm *kvm); |
1275 | void kvm_arm_resume_guest(struct kvm *kvm); | |
4f8d6632 | 1276 | |
3e7f4318 | 1277 | #define vcpu_has_run_once(vcpu) (!!READ_ONCE((vcpu)->pid)) |
cc5705fb | 1278 | |
40a50853 | 1279 | #ifndef __KVM_NVHE_HYPERVISOR__ |
05469831 | 1280 | #define kvm_call_hyp_nvhe(f, ...) \ |
f50b6f6a | 1281 | ({ \ |
05469831 AS |
1282 | struct arm_smccc_res res; \ |
1283 | \ | |
1284 | arm_smccc_1_1_hvc(KVM_HOST_SMCCC_FUNC(f), \ | |
1285 | ##__VA_ARGS__, &res); \ | |
1286 | WARN_ON(res.a0 != SMCCC_RET_SUCCESS); \ | |
1287 | \ | |
1288 | res.a1; \ | |
f50b6f6a AS |
1289 | }) |
1290 | ||
18fc7bf8 | 1291 | /* |
04c5355b MR |
1292 | * The isb() below is there to guarantee the same behaviour on VHE as on !VHE, |
1293 | * where the eret to EL1 acts as a context synchronization event. | |
18fc7bf8 MZ |
1294 | */ |
1295 | #define kvm_call_hyp(f, ...) \ | |
1296 | do { \ | |
1297 | if (has_vhe()) { \ | |
1298 | f(__VA_ARGS__); \ | |
1299 | isb(); \ | |
1300 | } else { \ | |
f50b6f6a | 1301 | kvm_call_hyp_nvhe(f, ##__VA_ARGS__); \ |
18fc7bf8 MZ |
1302 | } \ |
1303 | } while(0) | |
1304 | ||
1305 | #define kvm_call_hyp_ret(f, ...) \ | |
1306 | ({ \ | |
1307 | typeof(f(__VA_ARGS__)) ret; \ | |
1308 | \ | |
1309 | if (has_vhe()) { \ | |
1310 | ret = f(__VA_ARGS__); \ | |
18fc7bf8 | 1311 | } else { \ |
05469831 | 1312 | ret = kvm_call_hyp_nvhe(f, ##__VA_ARGS__); \ |
18fc7bf8 MZ |
1313 | } \ |
1314 | \ | |
1315 | ret; \ | |
1316 | }) | |
40a50853 QP |
1317 | #else /* __KVM_NVHE_HYPERVISOR__ */ |
1318 | #define kvm_call_hyp(f, ...) f(__VA_ARGS__) | |
1319 | #define kvm_call_hyp_ret(f, ...) f(__VA_ARGS__) | |
1320 | #define kvm_call_hyp_nvhe(f, ...) f(__VA_ARGS__) | |
1321 | #endif /* __KVM_NVHE_HYPERVISOR__ */ | |
22b39ca3 | 1322 | |
74cc7e0c TZ |
1323 | int handle_exit(struct kvm_vcpu *vcpu, int exception_index); |
1324 | void handle_exit_early(struct kvm_vcpu *vcpu, int exception_index); | |
4f8d6632 | 1325 | |
6ac4a5ac MZ |
1326 | int kvm_handle_cp14_load_store(struct kvm_vcpu *vcpu); |
1327 | int kvm_handle_cp14_32(struct kvm_vcpu *vcpu); | |
1328 | int kvm_handle_cp14_64(struct kvm_vcpu *vcpu); | |
1329 | int kvm_handle_cp15_32(struct kvm_vcpu *vcpu); | |
1330 | int kvm_handle_cp15_64(struct kvm_vcpu *vcpu); | |
1331 | int kvm_handle_sys_reg(struct kvm_vcpu *vcpu); | |
9369bc5c | 1332 | int kvm_handle_cp10_id(struct kvm_vcpu *vcpu); |
6ac4a5ac | 1333 | |
5c1ebe9a | 1334 | void kvm_sys_regs_create_debugfs(struct kvm *kvm); |
6ac4a5ac MZ |
1335 | void kvm_reset_sys_regs(struct kvm_vcpu *vcpu); |
1336 | ||
8d20bd63 | 1337 | int __init kvm_sys_reg_table_init(void); |
19f3e7ea MZ |
1338 | struct sys_reg_desc; |
1339 | int __init populate_sysreg_config(const struct sys_reg_desc *sr, | |
1340 | unsigned int idx); | |
e58ec47b | 1341 | int __init populate_nv_trap_config(void); |
6ac4a5ac | 1342 | |
f1ff3fc5 | 1343 | void kvm_calculate_traps(struct kvm_vcpu *vcpu); |
c5bac1ef | 1344 | |
0e20f5e2 MZ |
1345 | /* MMIO helpers */ |
1346 | void kvm_mmio_write_buf(void *buf, unsigned int len, unsigned long data); | |
1347 | unsigned long kvm_mmio_read_buf(const void *buf, unsigned int len); | |
1348 | ||
74cc7e0c TZ |
1349 | int kvm_handle_mmio_return(struct kvm_vcpu *vcpu); |
1350 | int io_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa); | |
0e20f5e2 | 1351 | |
e1bfc245 SC |
1352 | /* |
1353 | * Returns true if a Performance Monitoring Interrupt (PMI), a.k.a. perf event, | |
1354 | * arrived in guest context. For arm64, any event that arrives while a vCPU is | |
1355 | * loaded is considered to be "in guest". | |
1356 | */ | |
1357 | static inline bool kvm_arch_pmi_in_guest(struct kvm_vcpu *vcpu) | |
1358 | { | |
1359 | return IS_ENABLED(CONFIG_GUEST_PERF_EVENTS) && !!vcpu; | |
1360 | } | |
1361 | ||
b48c1a45 | 1362 | long kvm_hypercall_pv_features(struct kvm_vcpu *vcpu); |
8564d637 SP |
1363 | gpa_t kvm_init_stolen_time(struct kvm_vcpu *vcpu); |
1364 | void kvm_update_stolen_time(struct kvm_vcpu *vcpu); | |
1365 | ||
004a0124 | 1366 | bool kvm_arm_pvtime_supported(void); |
58772e9a SP |
1367 | int kvm_arm_pvtime_set_attr(struct kvm_vcpu *vcpu, |
1368 | struct kvm_device_attr *attr); | |
1369 | int kvm_arm_pvtime_get_attr(struct kvm_vcpu *vcpu, | |
1370 | struct kvm_device_attr *attr); | |
1371 | int kvm_arm_pvtime_has_attr(struct kvm_vcpu *vcpu, | |
1372 | struct kvm_device_attr *attr); | |
1373 | ||
8d20bd63 SC |
1374 | extern unsigned int __ro_after_init kvm_arm_vmid_bits; |
1375 | int __init kvm_arm_vmid_alloc_init(void); | |
1376 | void __init kvm_arm_vmid_alloc_free(void); | |
fa808ed4 | 1377 | void kvm_arm_vmid_update(struct kvm_vmid *kvm_vmid); |
100b4f09 | 1378 | void kvm_arm_vmid_clear_active(void); |
41783839 | 1379 | |
8564d637 SP |
1380 | static inline void kvm_arm_pvtime_vcpu_init(struct kvm_vcpu_arch *vcpu_arch) |
1381 | { | |
cecafc0a | 1382 | vcpu_arch->steal.base = INVALID_GPA; |
8564d637 SP |
1383 | } |
1384 | ||
1385 | static inline bool kvm_arm_is_pvtime_enabled(struct kvm_vcpu_arch *vcpu_arch) | |
1386 | { | |
cecafc0a | 1387 | return (vcpu_arch->steal.base != INVALID_GPA); |
8564d637 | 1388 | } |
b48c1a45 | 1389 | |
b7b27fac DG |
1390 | void kvm_set_sei_esr(struct kvm_vcpu *vcpu, u64 syndrome); |
1391 | ||
4429fc64 AP |
1392 | struct kvm_vcpu *kvm_mpidr_to_vcpu(struct kvm *kvm, unsigned long mpidr); |
1393 | ||
14ef9d04 | 1394 | DECLARE_KVM_HYP_PER_CPU(struct kvm_host_data, kvm_host_data); |
4464e210 | 1395 | |
87f842c6 MZ |
1396 | /* |
1397 | * How we access per-CPU host data depends on the where we access it from, | |
1398 | * and the mode we're in: | |
1399 | * | |
1400 | * - VHE and nVHE hypervisor bits use their locally defined instance | |
1401 | * | |
1402 | * - the rest of the kernel use either the VHE or nVHE one, depending on | |
1403 | * the mode we're running in. | |
1404 | * | |
1405 | * Unless we're in protected mode, fully deprivileged, and the nVHE | |
1406 | * per-CPU stuff is exclusively accessible to the protected EL2 code. | |
1407 | * In this case, the EL1 code uses the *VHE* data as its private state | |
1408 | * (which makes sense in a way as there shouldn't be any shared state | |
1409 | * between the host and the hypervisor). | |
1410 | * | |
1411 | * Yes, this is all totally trivial. Shoot me now. | |
1412 | */ | |
1413 | #if defined(__KVM_NVHE_HYPERVISOR__) || defined(__KVM_VHE_HYPERVISOR__) | |
1414 | #define host_data_ptr(f) (&this_cpu_ptr(&kvm_host_data)->f) | |
1415 | #else | |
1416 | #define host_data_ptr(f) \ | |
1417 | (static_branch_unlikely(&kvm_protected_mode_initialized) ? \ | |
1418 | &this_cpu_ptr(&kvm_host_data)->f : \ | |
1419 | &this_cpu_ptr_hyp_sym(kvm_host_data)->f) | |
1420 | #endif | |
1421 | ||
38131c02 OU |
1422 | #define host_data_test_flag(flag) \ |
1423 | (test_bit(KVM_HOST_DATA_FLAG_##flag, host_data_ptr(flags))) | |
1424 | #define host_data_set_flag(flag) \ | |
1425 | set_bit(KVM_HOST_DATA_FLAG_##flag, host_data_ptr(flags)) | |
1426 | #define host_data_clear_flag(flag) \ | |
1427 | clear_bit(KVM_HOST_DATA_FLAG_##flag, host_data_ptr(flags)) | |
1428 | ||
b5b85bd7 FT |
1429 | /* Check whether the FP regs are owned by the guest */ |
1430 | static inline bool guest_owns_fp_regs(void) | |
1431 | { | |
1432 | return *host_data_ptr(fp_owner) == FP_STATE_GUEST_OWNED; | |
1433 | } | |
1434 | ||
f11290e0 FT |
1435 | /* Check whether the FP regs are owned by the host */ |
1436 | static inline bool host_owns_fp_regs(void) | |
1437 | { | |
1438 | return *host_data_ptr(fp_owner) == FP_STATE_HOST_OWNED; | |
1439 | } | |
1440 | ||
1e0cf16c | 1441 | static inline void kvm_init_host_cpu_context(struct kvm_cpu_context *cpu_ctxt) |
32f13955 MZ |
1442 | { |
1443 | /* The host's MPIDR is immutable, so let's set it up at boot time */ | |
71071acf | 1444 | ctxt_sys_reg(cpu_ctxt, MPIDR_EL1) = read_cpuid_mpidr(); |
32f13955 MZ |
1445 | } |
1446 | ||
5bdf3437 JM |
1447 | static inline bool kvm_system_needs_idmapped_vectors(void) |
1448 | { | |
d8569fba | 1449 | return cpus_have_final_cap(ARM64_SPECTRE_V3A); |
5bdf3437 JM |
1450 | } |
1451 | ||
2417218f | 1452 | void kvm_init_host_debug_data(void); |
cd9b1010 | 1453 | void kvm_vcpu_load_debug(struct kvm_vcpu *vcpu); |
2ca3f03b | 1454 | void kvm_vcpu_put_debug(struct kvm_vcpu *vcpu); |
cd9b1010 | 1455 | void kvm_debug_set_guest_ownership(struct kvm_vcpu *vcpu); |
06d22a9c | 1456 | void kvm_debug_handle_oslar(struct kvm_vcpu *vcpu, u64 val); |
7dabf02f OU |
1457 | |
1458 | #define kvm_vcpu_os_lock_enabled(vcpu) \ | |
187de7c2 | 1459 | (!!(__vcpu_sys_reg(vcpu, OSLSR_EL1) & OSLSR_EL1_OSLK)) |
7dabf02f | 1460 | |
58db67e9 OU |
1461 | #define kvm_debug_regs_in_use(vcpu) \ |
1462 | ((vcpu)->arch.debug_owner != VCPU_DEBUG_FREE) | |
cd9b1010 OU |
1463 | #define kvm_host_owns_debug_regs(vcpu) \ |
1464 | ((vcpu)->arch.debug_owner == VCPU_DEBUG_HOST_OWNED) | |
beb470d9 OU |
1465 | #define kvm_guest_owns_debug_regs(vcpu) \ |
1466 | ((vcpu)->arch.debug_owner == VCPU_DEBUG_GUEST_OWNED) | |
cd9b1010 | 1467 | |
bb0c70bc SZ |
1468 | int kvm_arm_vcpu_arch_set_attr(struct kvm_vcpu *vcpu, |
1469 | struct kvm_device_attr *attr); | |
1470 | int kvm_arm_vcpu_arch_get_attr(struct kvm_vcpu *vcpu, | |
1471 | struct kvm_device_attr *attr); | |
1472 | int kvm_arm_vcpu_arch_has_attr(struct kvm_vcpu *vcpu, | |
1473 | struct kvm_device_attr *attr); | |
56c7f5e7 | 1474 | |
2def950c TH |
1475 | int kvm_vm_ioctl_mte_copy_tags(struct kvm *kvm, |
1476 | struct kvm_arm_copy_mte_tags *copy_tags); | |
30ec7997 MZ |
1477 | int kvm_vm_ioctl_set_counter_offset(struct kvm *kvm, |
1478 | struct kvm_arm_counter_offset *offset); | |
3f9cd0ca JZ |
1479 | int kvm_vm_ioctl_get_reg_writable_masks(struct kvm *kvm, |
1480 | struct reg_mask_range *range); | |
f0376edb | 1481 | |
e6b673b7 DM |
1482 | /* Guest/host FPSIMD coordination helpers */ |
1483 | int kvm_arch_vcpu_run_map_fp(struct kvm_vcpu *vcpu); | |
1484 | void kvm_arch_vcpu_load_fp(struct kvm_vcpu *vcpu); | |
af9a0e21 | 1485 | void kvm_arch_vcpu_ctxflush_fp(struct kvm_vcpu *vcpu); |
e6b673b7 DM |
1486 | void kvm_arch_vcpu_ctxsync_fp(struct kvm_vcpu *vcpu); |
1487 | void kvm_arch_vcpu_put_fp(struct kvm_vcpu *vcpu); | |
1488 | ||
eb41238c AM |
1489 | static inline bool kvm_pmu_counter_deferred(struct perf_event_attr *attr) |
1490 | { | |
435e53fb | 1491 | return (!has_vhe() && attr->exclude_host); |
eb41238c AM |
1492 | } |
1493 | ||
052f064d | 1494 | #ifdef CONFIG_KVM |
a4a6e207 RHA |
1495 | void kvm_set_pmu_events(u64 set, struct perf_event_attr *attr); |
1496 | void kvm_clr_pmu_events(u64 clr); | |
0c2f9acf | 1497 | bool kvm_set_pmuserenr(u64 val); |
a665e3bc JC |
1498 | void kvm_enable_trbe(void); |
1499 | void kvm_disable_trbe(void); | |
054b8839 | 1500 | void kvm_tracing_set_el1_configuration(u64 trfcr_while_in_guest); |
eb41238c | 1501 | #else |
a4a6e207 RHA |
1502 | static inline void kvm_set_pmu_events(u64 set, struct perf_event_attr *attr) {} |
1503 | static inline void kvm_clr_pmu_events(u64 clr) {} | |
0c2f9acf RW |
1504 | static inline bool kvm_set_pmuserenr(u64 val) |
1505 | { | |
1506 | return false; | |
1507 | } | |
a665e3bc JC |
1508 | static inline void kvm_enable_trbe(void) {} |
1509 | static inline void kvm_disable_trbe(void) {} | |
054b8839 | 1510 | static inline void kvm_tracing_set_el1_configuration(u64 trfcr_while_in_guest) {} |
e6b673b7 | 1511 | #endif |
17eed27b | 1512 | |
27cde4c0 OU |
1513 | void kvm_vcpu_load_vhe(struct kvm_vcpu *vcpu); |
1514 | void kvm_vcpu_put_vhe(struct kvm_vcpu *vcpu); | |
bc192cee | 1515 | |
8d20bd63 | 1516 | int __init kvm_set_ipa_limit(void); |
4f128f8e | 1517 | u32 kvm_get_pa_bits(struct kvm *kvm); |
0f62f0e9 | 1518 | |
d1e5b0e9 MO |
1519 | #define __KVM_HAVE_ARCH_VM_ALLOC |
1520 | struct kvm *kvm_arch_alloc_vm(void); | |
d1e5b0e9 | 1521 | |
32121c81 RRA |
1522 | #define __KVM_HAVE_ARCH_FLUSH_REMOTE_TLBS |
1523 | ||
c42b6f0b RRA |
1524 | #define __KVM_HAVE_ARCH_FLUSH_REMOTE_TLBS_RANGE |
1525 | ||
b6ed4fa9 FT |
1526 | #define kvm_vm_is_protected(kvm) (is_protected_kvm_enabled() && (kvm)->arch.pkvm.enabled) |
1527 | ||
1528 | #define vcpu_is_protected(vcpu) kvm_vm_is_protected((vcpu)->kvm) | |
2ea7f655 | 1529 | |
92e68b2b | 1530 | int kvm_arm_vcpu_finalize(struct kvm_vcpu *vcpu, int feature); |
9033bba4 DM |
1531 | bool kvm_arm_vcpu_is_finalized(struct kvm_vcpu *vcpu); |
1532 | ||
4c0680d3 | 1533 | #define kvm_arm_vcpu_sve_finalized(vcpu) vcpu_get_flag(vcpu, VCPU_SVE_FINALIZED) |
7dd32a0d | 1534 | |
06394531 MZ |
1535 | #define kvm_has_mte(kvm) \ |
1536 | (system_supports_mte() && \ | |
1537 | test_bit(KVM_ARCH_FLAG_MTE_ENABLED, &(kvm)->arch.flags)) | |
14bda7a9 | 1538 | |
f3c6efc7 OU |
1539 | #define kvm_supports_32bit_el0() \ |
1540 | (system_supports_32bit_el0() && \ | |
1541 | !static_branch_unlikely(&arm64_mismatched_32bit_el0)) | |
1542 | ||
de40bb8a OU |
1543 | #define kvm_vm_has_ran_once(kvm) \ |
1544 | (test_bit(KVM_ARCH_FLAG_HAS_RAN_ONCE, &(kvm)->arch.flags)) | |
1545 | ||
111903d1 MZ |
1546 | static inline bool __vcpu_has_feature(const struct kvm_arch *ka, int feature) |
1547 | { | |
1548 | return test_bit(feature, ka->vcpu_features); | |
1549 | } | |
1550 | ||
3d7ff007 | 1551 | #define kvm_vcpu_has_feature(k, f) __vcpu_has_feature(&(k)->arch, (f)) |
111903d1 MZ |
1552 | #define vcpu_has_feature(v, f) __vcpu_has_feature(&(v)->kvm->arch, (f)) |
1553 | ||
e0163337 OU |
1554 | #define kvm_vcpu_initialized(v) vcpu_get_flag(vcpu, VCPU_INITIALIZED) |
1555 | ||
a8e190cd | 1556 | int kvm_trng_call(struct kvm_vcpu *vcpu); |
f320bc74 QP |
1557 | #ifdef CONFIG_KVM |
1558 | extern phys_addr_t hyp_mem_base; | |
1559 | extern phys_addr_t hyp_mem_size; | |
1560 | void __init kvm_hyp_reserve(void); | |
1561 | #else | |
1562 | static inline void kvm_hyp_reserve(void) { } | |
1563 | #endif | |
a8e190cd | 1564 | |
1e579429 | 1565 | void kvm_arm_vcpu_power_off(struct kvm_vcpu *vcpu); |
b171f9bb | 1566 | bool kvm_arm_vcpu_stopped(struct kvm_vcpu *vcpu); |
1e579429 | 1567 | |
97ca3fcc OU |
1568 | static inline u64 *__vm_id_reg(struct kvm_arch *ka, u32 reg) |
1569 | { | |
1570 | switch (reg) { | |
1571 | case sys_reg(3, 0, 0, 1, 0) ... sys_reg(3, 0, 0, 7, 7): | |
1572 | return &ka->id_regs[IDREG_IDX(reg)]; | |
2843cae2 SO |
1573 | case SYS_CTR_EL0: |
1574 | return &ka->ctr_el0; | |
b4043e7c SO |
1575 | case SYS_MIDR_EL1: |
1576 | return &ka->midr_el1; | |
1577 | case SYS_REVIDR_EL1: | |
1578 | return &ka->revidr_el1; | |
1579 | case SYS_AIDR_EL1: | |
1580 | return &ka->aidr_el1; | |
97ca3fcc OU |
1581 | default: |
1582 | WARN_ON_ONCE(1); | |
1583 | return NULL; | |
1584 | } | |
1585 | } | |
1586 | ||
1587 | #define kvm_read_vm_id_reg(kvm, reg) \ | |
1588 | ({ u64 __val = *__vm_id_reg(&(kvm)->arch, reg); __val; }) | |
1589 | ||
d7508d27 OU |
1590 | void kvm_set_vm_id_reg(struct kvm *kvm, u32 reg, u64 val); |
1591 | ||
c62d7a23 MZ |
1592 | #define __expand_field_sign_unsigned(id, fld, val) \ |
1593 | ((u64)SYS_FIELD_VALUE(id, fld, val)) | |
1594 | ||
1595 | #define __expand_field_sign_signed(id, fld, val) \ | |
1596 | ({ \ | |
1597 | u64 __val = SYS_FIELD_VALUE(id, fld, val); \ | |
1598 | sign_extend64(__val, id##_##fld##_WIDTH - 1); \ | |
1599 | }) | |
1600 | ||
c62d7a23 MZ |
1601 | #define get_idreg_field_unsigned(kvm, id, fld) \ |
1602 | ({ \ | |
97ca3fcc | 1603 | u64 __val = kvm_read_vm_id_reg((kvm), SYS_##id); \ |
c62d7a23 MZ |
1604 | FIELD_GET(id##_##fld##_MASK, __val); \ |
1605 | }) | |
1606 | ||
1607 | #define get_idreg_field_signed(kvm, id, fld) \ | |
1608 | ({ \ | |
1609 | u64 __val = get_idreg_field_unsigned(kvm, id, fld); \ | |
1610 | sign_extend64(__val, id##_##fld##_WIDTH - 1); \ | |
1611 | }) | |
1612 | ||
1613 | #define get_idreg_field_enum(kvm, id, fld) \ | |
1614 | get_idreg_field_unsigned(kvm, id, fld) | |
1615 | ||
a1d402ab MZ |
1616 | #define kvm_cmp_feat_signed(kvm, id, fld, op, limit) \ |
1617 | (get_idreg_field_signed((kvm), id, fld) op __expand_field_sign_signed(id, fld, limit)) | |
1618 | ||
1619 | #define kvm_cmp_feat_unsigned(kvm, id, fld, op, limit) \ | |
1620 | (get_idreg_field_unsigned((kvm), id, fld) op __expand_field_sign_unsigned(id, fld, limit)) | |
1621 | ||
1622 | #define kvm_cmp_feat(kvm, id, fld, op, limit) \ | |
c62d7a23 | 1623 | (id##_##fld##_SIGNED ? \ |
a1d402ab MZ |
1624 | kvm_cmp_feat_signed(kvm, id, fld, op, limit) : \ |
1625 | kvm_cmp_feat_unsigned(kvm, id, fld, op, limit)) | |
c62d7a23 | 1626 | |
a764b56b | 1627 | #define __kvm_has_feat(kvm, id, fld, limit) \ |
a1d402ab | 1628 | kvm_cmp_feat(kvm, id, fld, >=, limit) |
c62d7a23 | 1629 | |
a764b56b MZ |
1630 | #define kvm_has_feat(kvm, ...) __kvm_has_feat(kvm, __VA_ARGS__) |
1631 | ||
1632 | #define __kvm_has_feat_enum(kvm, id, fld, val) \ | |
a1d402ab | 1633 | kvm_cmp_feat_unsigned(kvm, id, fld, ==, val) |
c62d7a23 | 1634 | |
a764b56b MZ |
1635 | #define kvm_has_feat_enum(kvm, ...) __kvm_has_feat_enum(kvm, __VA_ARGS__) |
1636 | ||
c62d7a23 | 1637 | #define kvm_has_feat_range(kvm, id, fld, min, max) \ |
a1d402ab MZ |
1638 | (kvm_cmp_feat(kvm, id, fld, >=, min) && \ |
1639 | kvm_cmp_feat(kvm, id, fld, <=, max)) | |
c62d7a23 | 1640 | |
719f5206 MZ |
1641 | /* Check for a given level of PAuth support */ |
1642 | #define kvm_has_pauth(k, l) \ | |
1643 | ({ \ | |
1644 | bool pa, pi, pa3; \ | |
1645 | \ | |
1646 | pa = kvm_has_feat((k), ID_AA64ISAR1_EL1, APA, l); \ | |
1647 | pa &= kvm_has_feat((k), ID_AA64ISAR1_EL1, GPA, IMP); \ | |
1648 | pi = kvm_has_feat((k), ID_AA64ISAR1_EL1, API, l); \ | |
1649 | pi &= kvm_has_feat((k), ID_AA64ISAR1_EL1, GPI, IMP); \ | |
1650 | pa3 = kvm_has_feat((k), ID_AA64ISAR2_EL1, APA3, l); \ | |
1651 | pa3 &= kvm_has_feat((k), ID_AA64ISAR2_EL1, GPA3, IMP); \ | |
1652 | \ | |
1653 | (pa + pi + pa3) == 1; \ | |
1654 | }) | |
1655 | ||
d4db9879 MZ |
1656 | #define kvm_has_fpmr(k) \ |
1657 | (system_supports_fpmr() && \ | |
1658 | kvm_has_feat((k), ID_AA64PFR2_EL1, FPMR, IMP)) | |
1659 | ||
0fcb4eea MB |
1660 | #define kvm_has_tcr2(k) \ |
1661 | (kvm_has_feat((k), ID_AA64MMFR3_EL1, TCRX, IMP)) | |
1662 | ||
a68cddbe MB |
1663 | #define kvm_has_s1pie(k) \ |
1664 | (kvm_has_feat((k), ID_AA64MMFR3_EL1, S1PIE, IMP)) | |
1665 | ||
26e89dcc MZ |
1666 | #define kvm_has_s1poe(k) \ |
1667 | (kvm_has_feat((k), ID_AA64MMFR3_EL1, S1POE, IMP)) | |
1668 | ||
5f9e1698 PB |
1669 | static inline bool kvm_arch_has_irq_bypass(void) |
1670 | { | |
1671 | return true; | |
1672 | } | |
1673 | ||
63d423a7 | 1674 | void compute_fgu(struct kvm *kvm, enum fgt_group_id fgt); |
c6cbe6a4 | 1675 | void get_reg_fixed_bits(struct kvm *kvm, enum vcpu_sysreg reg, u64 *res0, u64 *res1); |
938a79d0 | 1676 | void check_feature_map(void); |
63d423a7 | 1677 | |
fef3acf5 | 1678 | |
4f8d6632 | 1679 | #endif /* __ARM64_KVM_HOST_H__ */ |