Commit | Line | Data |
---|---|---|
caab277b | 1 | /* SPDX-License-Identifier: GPL-2.0-only */ |
4f8d6632 MZ |
2 | /* |
3 | * Copyright (C) 2012,2013 - ARM Ltd | |
4 | * Author: Marc Zyngier <marc.zyngier@arm.com> | |
5 | * | |
6 | * Derived from arch/arm/include/asm/kvm_host.h: | |
7 | * Copyright (C) 2012 - Virtual Open Systems and Columbia University | |
8 | * Author: Christoffer Dall <c.dall@virtualopensystems.com> | |
4f8d6632 MZ |
9 | */ |
10 | ||
11 | #ifndef __ARM64_KVM_HOST_H__ | |
12 | #define __ARM64_KVM_HOST_H__ | |
13 | ||
05469831 | 14 | #include <linux/arm-smccc.h> |
3f61f409 | 15 | #include <linux/bitmap.h> |
65647300 | 16 | #include <linux/types.h> |
3f61f409 | 17 | #include <linux/jump_label.h> |
65647300 | 18 | #include <linux/kvm_types.h> |
3f61f409 | 19 | #include <linux/percpu.h> |
ff367fe4 | 20 | #include <linux/psci.h> |
85738e05 | 21 | #include <asm/arch_gicv3.h> |
3f61f409 | 22 | #include <asm/barrier.h> |
63a1e1c9 | 23 | #include <asm/cpufeature.h> |
1e0cf16c | 24 | #include <asm/cputype.h> |
4f5abad9 | 25 | #include <asm/daifflags.h> |
17eed27b | 26 | #include <asm/fpsimd.h> |
4f8d6632 | 27 | #include <asm/kvm.h> |
3a3604bc | 28 | #include <asm/kvm_asm.h> |
4f8d6632 | 29 | |
c1426e4c EA |
30 | #define __KVM_HAVE_ARCH_INTC_INITIALIZED |
31 | ||
920552b2 | 32 | #define KVM_HALT_POLL_NS_DEFAULT 500000 |
4f8d6632 MZ |
33 | |
34 | #include <kvm/arm_vgic.h> | |
35 | #include <kvm/arm_arch_timer.h> | |
04fe4726 | 36 | #include <kvm/arm_pmu.h> |
4f8d6632 | 37 | |
ef748917 ML |
38 | #define KVM_MAX_VCPUS VGIC_V3_MAX_CPUS |
39 | ||
a22fa321 | 40 | #define KVM_VCPU_MAX_FEATURES 7 |
4f8d6632 | 41 | |
7b244e2b | 42 | #define KVM_REQ_SLEEP \ |
2387149e | 43 | KVM_ARCH_REQ_FLAGS(0, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP) |
325f9c64 | 44 | #define KVM_REQ_IRQ_PENDING KVM_ARCH_REQ(1) |
358b28f0 | 45 | #define KVM_REQ_VCPU_RESET KVM_ARCH_REQ(2) |
8564d637 | 46 | #define KVM_REQ_RECORD_STEAL KVM_ARCH_REQ(3) |
d9c3872c | 47 | #define KVM_REQ_RELOAD_GICv4 KVM_ARCH_REQ(4) |
d0c94c49 | 48 | #define KVM_REQ_RELOAD_PMU KVM_ARCH_REQ(5) |
7b33a09d | 49 | #define KVM_REQ_SUSPEND KVM_ARCH_REQ(6) |
b13216cf | 50 | |
c862626e KZ |
51 | #define KVM_DIRTY_LOG_MANUAL_CAPS (KVM_DIRTY_LOG_MANUAL_PROTECT_ENABLE | \ |
52 | KVM_DIRTY_LOG_INITIALLY_SET) | |
53 | ||
fcc5bf89 JZ |
54 | #define KVM_HAVE_MMU_RWLOCK |
55 | ||
d8b369c4 DB |
56 | /* |
57 | * Mode of operation configurable with kvm-arm.mode early param. | |
58 | * See Documentation/admin-guide/kernel-parameters.txt for more information. | |
59 | */ | |
60 | enum kvm_mode { | |
61 | KVM_MODE_DEFAULT, | |
62 | KVM_MODE_PROTECTED, | |
b6a68b97 | 63 | KVM_MODE_NONE, |
d8b369c4 | 64 | }; |
3eb681fb | 65 | enum kvm_mode kvm_get_mode(void); |
d8b369c4 | 66 | |
61bbe380 CD |
67 | DECLARE_STATIC_KEY_FALSE(userspace_irqchip_in_use); |
68 | ||
9033bba4 | 69 | extern unsigned int kvm_sve_max_vl; |
a3be836d | 70 | int kvm_arm_init_sve(void); |
0f062bfe | 71 | |
6b7982fe | 72 | u32 __attribute_const__ kvm_target_cpu(void); |
4f8d6632 | 73 | int kvm_reset_vcpu(struct kvm_vcpu *vcpu); |
19bcc89e | 74 | void kvm_arm_vcpu_destroy(struct kvm_vcpu *vcpu); |
4f8d6632 | 75 | |
e329fb75 | 76 | struct kvm_vmid { |
3248136b | 77 | atomic64_t id; |
e329fb75 CD |
78 | }; |
79 | ||
a0e50aa3 | 80 | struct kvm_s2_mmu { |
e329fb75 | 81 | struct kvm_vmid vmid; |
4f8d6632 | 82 | |
a0e50aa3 CD |
83 | /* |
84 | * stage2 entry level table | |
85 | * | |
86 | * Two kvm_s2_mmu structures in the same VM can point to the same | |
87 | * pgd here. This happens when running a guest using a | |
88 | * translation regime that isn't affected by its own stage-2 | |
89 | * translation, such as a non-VHE hypervisor running at vEL2, or | |
90 | * for vEL1/EL0 with vHCR_EL2.VM == 0. In that case, we use the | |
91 | * canonical stage-2 page tables. | |
92 | */ | |
a0e50aa3 | 93 | phys_addr_t pgd_phys; |
71233d05 | 94 | struct kvm_pgtable *pgt; |
4f8d6632 | 95 | |
94d0e598 MZ |
96 | /* The last vcpu id that ran on each physical CPU */ |
97 | int __percpu *last_vcpu_ran; | |
98 | ||
cfb1a98d | 99 | struct kvm_arch *arch; |
a0e50aa3 CD |
100 | }; |
101 | ||
8d14797b WD |
102 | struct kvm_arch_memory_slot { |
103 | }; | |
104 | ||
05714cab RRA |
105 | /** |
106 | * struct kvm_smccc_features: Descriptor of the hypercall services exposed to the guests | |
107 | * | |
108 | * @std_bmap: Bitmap of standard secure service calls | |
428fd678 | 109 | * @std_hyp_bmap: Bitmap of standard hypervisor service calls |
b22216e1 | 110 | * @vendor_hyp_bmap: Bitmap of vendor specific hypervisor service calls |
05714cab RRA |
111 | */ |
112 | struct kvm_smccc_features { | |
113 | unsigned long std_bmap; | |
428fd678 | 114 | unsigned long std_hyp_bmap; |
b22216e1 | 115 | unsigned long vendor_hyp_bmap; |
05714cab RRA |
116 | }; |
117 | ||
a0e50aa3 CD |
118 | struct kvm_arch { |
119 | struct kvm_s2_mmu mmu; | |
120 | ||
121 | /* VTCR_EL2 value for this VM */ | |
122 | u64 vtcr; | |
123 | ||
4f8d6632 MZ |
124 | /* Interrupt controller */ |
125 | struct vgic_dist vgic; | |
85bd0ba1 MZ |
126 | |
127 | /* Mandated version of PSCI */ | |
128 | u32 psci_version; | |
c726200d CD |
129 | |
130 | /* | |
131 | * If we encounter a data abort without valid instruction syndrome | |
132 | * information, report this to user space. User space can (and | |
133 | * should) opt in to this feature if KVM_CAP_ARM_NISV_TO_USER is | |
134 | * supported. | |
135 | */ | |
06394531 MZ |
136 | #define KVM_ARCH_FLAG_RETURN_NISV_IO_ABORT_TO_USER 0 |
137 | /* Memory Tagging Extension enabled for the guest */ | |
138 | #define KVM_ARCH_FLAG_MTE_ENABLED 1 | |
139 | /* At least one vCPU has ran in the VM */ | |
140 | #define KVM_ARCH_FLAG_HAS_RAN_ONCE 2 | |
26bf74bd RW |
141 | /* |
142 | * The following two bits are used to indicate the guest's EL1 | |
143 | * register width configuration. A value of KVM_ARCH_FLAG_EL1_32BIT | |
144 | * bit is valid only when KVM_ARCH_FLAG_REG_WIDTH_CONFIGURED is set. | |
145 | * Otherwise, the guest's EL1 register width has not yet been | |
146 | * determined yet. | |
147 | */ | |
148 | #define KVM_ARCH_FLAG_REG_WIDTH_CONFIGURED 3 | |
149 | #define KVM_ARCH_FLAG_EL1_32BIT 4 | |
bfbab445 OU |
150 | /* PSCI SYSTEM_SUSPEND enabled for the guest */ |
151 | #define KVM_ARCH_FLAG_SYSTEM_SUSPEND_ENABLED 5 | |
26bf74bd | 152 | |
06394531 | 153 | unsigned long flags; |
fd65a3b5 | 154 | |
d7eec236 MZ |
155 | /* |
156 | * VM-wide PMU filter, implemented as a bitmap and big enough for | |
157 | * up to 2^10 events (ARMv8.0) or 2^16 events (ARMv8.1+). | |
158 | */ | |
159 | unsigned long *pmu_filter; | |
46b18782 | 160 | struct arm_pmu *arm_pmu; |
23711a5e | 161 | |
583cda1b | 162 | cpumask_var_t supported_cpus; |
23711a5e MZ |
163 | |
164 | u8 pfr0_csv2; | |
4f1df628 | 165 | u8 pfr0_csv3; |
05714cab RRA |
166 | |
167 | /* Hypercall features firmware registers' descriptor */ | |
168 | struct kvm_smccc_features smccc_feat; | |
4f8d6632 MZ |
169 | }; |
170 | ||
4f8d6632 | 171 | struct kvm_vcpu_fault_info { |
0b12620f | 172 | u64 esr_el2; /* Hyp Syndrom Register */ |
4f8d6632 MZ |
173 | u64 far_el2; /* Hyp Fault Address Register */ |
174 | u64 hpfar_el2; /* Hyp IPA Fault Address Register */ | |
0067df41 | 175 | u64 disr_el1; /* Deferred [SError] Status Register */ |
4f8d6632 MZ |
176 | }; |
177 | ||
9d8415d6 | 178 | enum vcpu_sysreg { |
8f7f4fe7 | 179 | __INVALID_SYSREG__, /* 0 is reserved as an invalid value */ |
9d8415d6 MZ |
180 | MPIDR_EL1, /* MultiProcessor Affinity Register */ |
181 | CSSELR_EL1, /* Cache Size Selection Register */ | |
182 | SCTLR_EL1, /* System Control Register */ | |
183 | ACTLR_EL1, /* Auxiliary Control Register */ | |
184 | CPACR_EL1, /* Coprocessor Access Control */ | |
73433762 | 185 | ZCR_EL1, /* SVE Control */ |
9d8415d6 MZ |
186 | TTBR0_EL1, /* Translation Table Base Register 0 */ |
187 | TTBR1_EL1, /* Translation Table Base Register 1 */ | |
188 | TCR_EL1, /* Translation Control Register */ | |
189 | ESR_EL1, /* Exception Syndrome Register */ | |
ef769e32 AB |
190 | AFSR0_EL1, /* Auxiliary Fault Status Register 0 */ |
191 | AFSR1_EL1, /* Auxiliary Fault Status Register 1 */ | |
9d8415d6 MZ |
192 | FAR_EL1, /* Fault Address Register */ |
193 | MAIR_EL1, /* Memory Attribute Indirection Register */ | |
194 | VBAR_EL1, /* Vector Base Address Register */ | |
195 | CONTEXTIDR_EL1, /* Context ID Register */ | |
196 | TPIDR_EL0, /* Thread ID, User R/W */ | |
197 | TPIDRRO_EL0, /* Thread ID, User R/O */ | |
198 | TPIDR_EL1, /* Thread ID, Privileged */ | |
199 | AMAIR_EL1, /* Aux Memory Attribute Indirection Register */ | |
200 | CNTKCTL_EL1, /* Timer Control Register (EL1) */ | |
201 | PAR_EL1, /* Physical Address Register */ | |
202 | MDSCR_EL1, /* Monitor Debug System Control Register */ | |
203 | MDCCINT_EL1, /* Monitor Debug Comms Channel Interrupt Enable Reg */ | |
d42e2671 | 204 | OSLSR_EL1, /* OS Lock Status Register */ |
c773ae2b | 205 | DISR_EL1, /* Deferred Interrupt Status Register */ |
9d8415d6 | 206 | |
ab946834 SZ |
207 | /* Performance Monitors Registers */ |
208 | PMCR_EL0, /* Control Register */ | |
3965c3ce | 209 | PMSELR_EL0, /* Event Counter Selection Register */ |
051ff581 SZ |
210 | PMEVCNTR0_EL0, /* Event Counter Register (0-30) */ |
211 | PMEVCNTR30_EL0 = PMEVCNTR0_EL0 + 30, | |
212 | PMCCNTR_EL0, /* Cycle Counter Register */ | |
9feb21ac SZ |
213 | PMEVTYPER0_EL0, /* Event Type Register (0-30) */ |
214 | PMEVTYPER30_EL0 = PMEVTYPER0_EL0 + 30, | |
215 | PMCCFILTR_EL0, /* Cycle Count Filter Register */ | |
96b0eebc | 216 | PMCNTENSET_EL0, /* Count Enable Set Register */ |
9db52c78 | 217 | PMINTENSET_EL1, /* Interrupt Enable Set Register */ |
76d883c4 | 218 | PMOVSSET_EL0, /* Overflow Flag Status Set Register */ |
d692b8ad | 219 | PMUSERENR_EL0, /* User Enable Register */ |
ab946834 | 220 | |
384b40ca MR |
221 | /* Pointer Authentication Registers in a strict increasing order. */ |
222 | APIAKEYLO_EL1, | |
223 | APIAKEYHI_EL1, | |
224 | APIBKEYLO_EL1, | |
225 | APIBKEYHI_EL1, | |
226 | APDAKEYLO_EL1, | |
227 | APDAKEYHI_EL1, | |
228 | APDBKEYLO_EL1, | |
229 | APDBKEYHI_EL1, | |
230 | APGAKEYLO_EL1, | |
231 | APGAKEYHI_EL1, | |
232 | ||
98909e6d | 233 | ELR_EL1, |
1bded23e | 234 | SP_EL1, |
710f1982 | 235 | SPSR_EL1, |
98909e6d | 236 | |
41ce82f6 MZ |
237 | CNTVOFF_EL2, |
238 | CNTV_CVAL_EL0, | |
239 | CNTV_CTL_EL0, | |
240 | CNTP_CVAL_EL0, | |
241 | CNTP_CTL_EL0, | |
242 | ||
e1f358b5 SP |
243 | /* Memory Tagging Extension registers */ |
244 | RGSR_EL1, /* Random Allocation Tag Seed Register */ | |
245 | GCR_EL1, /* Tag Control Register */ | |
246 | TFSR_EL1, /* Tag Fault Status Register (EL1) */ | |
247 | TFSRE0_EL1, /* Tag Fault Status Register (EL0) */ | |
248 | ||
9d8415d6 MZ |
249 | /* 32bit specific registers. Keep them at the end of the range */ |
250 | DACR32_EL2, /* Domain Access Control Register */ | |
251 | IFSR32_EL2, /* Instruction Fault Status Register */ | |
252 | FPEXC32_EL2, /* Floating-Point Exception Control Register */ | |
253 | DBGVCR32_EL2, /* Debug Vector Catch Register */ | |
254 | ||
255 | NR_SYS_REGS /* Nothing after this line! */ | |
256 | }; | |
257 | ||
4f8d6632 | 258 | struct kvm_cpu_context { |
e47c2055 MZ |
259 | struct user_pt_regs regs; /* sp = sp_el0 */ |
260 | ||
fd85b667 MZ |
261 | u64 spsr_abt; |
262 | u64 spsr_und; | |
263 | u64 spsr_irq; | |
264 | u64 spsr_fiq; | |
e47c2055 MZ |
265 | |
266 | struct user_fpsimd_state fp_regs; | |
267 | ||
5f7e02ae | 268 | u64 sys_regs[NR_SYS_REGS]; |
c97e166e JM |
269 | |
270 | struct kvm_vcpu *__hyp_running_vcpu; | |
4f8d6632 MZ |
271 | }; |
272 | ||
630a1685 AM |
273 | struct kvm_host_data { |
274 | struct kvm_cpu_context host_ctxt; | |
275 | }; | |
276 | ||
ff367fe4 DB |
277 | struct kvm_host_psci_config { |
278 | /* PSCI version used by host. */ | |
279 | u32 version; | |
280 | ||
281 | /* Function IDs used by host if version is v0.1. */ | |
282 | struct psci_0_1_function_ids function_ids_0_1; | |
283 | ||
767c973f MZ |
284 | bool psci_0_1_cpu_suspend_implemented; |
285 | bool psci_0_1_cpu_on_implemented; | |
286 | bool psci_0_1_cpu_off_implemented; | |
287 | bool psci_0_1_migrate_implemented; | |
ff367fe4 DB |
288 | }; |
289 | ||
290 | extern struct kvm_host_psci_config kvm_nvhe_sym(kvm_host_psci_config); | |
291 | #define kvm_host_psci_config CHOOSE_NVHE_SYM(kvm_host_psci_config) | |
292 | ||
61fe0c37 DB |
293 | extern s64 kvm_nvhe_sym(hyp_physvirt_offset); |
294 | #define hyp_physvirt_offset CHOOSE_NVHE_SYM(hyp_physvirt_offset) | |
295 | ||
296 | extern u64 kvm_nvhe_sym(hyp_cpu_logical_map)[NR_CPUS]; | |
297 | #define hyp_cpu_logical_map CHOOSE_NVHE_SYM(hyp_cpu_logical_map) | |
298 | ||
358b28f0 MZ |
299 | struct vcpu_reset_state { |
300 | unsigned long pc; | |
301 | unsigned long r0; | |
302 | bool be; | |
303 | bool reset; | |
304 | }; | |
305 | ||
4f8d6632 MZ |
306 | struct kvm_vcpu_arch { |
307 | struct kvm_cpu_context ctxt; | |
0033cd93 MB |
308 | |
309 | /* Guest floating point state */ | |
b43b5dd9 DM |
310 | void *sve_state; |
311 | unsigned int sve_max_vl; | |
0033cd93 | 312 | u64 svcr; |
4f8d6632 | 313 | |
a0e50aa3 CD |
314 | /* Stage 2 paging state used by the hardware on next switch */ |
315 | struct kvm_s2_mmu *hw_mmu; | |
316 | ||
1460b4b2 | 317 | /* Values of trap registers for the guest. */ |
4f8d6632 | 318 | u64 hcr_el2; |
d6c850dd | 319 | u64 mdcr_el2; |
cd496228 | 320 | u64 cptr_el2; |
4f8d6632 | 321 | |
1460b4b2 FT |
322 | /* Values of trap registers for the host before guest entry. */ |
323 | u64 mdcr_el2_host; | |
4f8d6632 MZ |
324 | |
325 | /* Exception Information */ | |
326 | struct kvm_vcpu_fault_info fault; | |
327 | ||
fa89d31c DM |
328 | /* Miscellaneous vcpu state flags */ |
329 | u64 flags; | |
0c557ed4 | 330 | |
84e690bf AB |
331 | /* |
332 | * We maintain more than a single set of debug registers to support | |
333 | * debugging the guest from the host and to maintain separate host and | |
334 | * guest state during world switches. vcpu_debug_state are the debug | |
335 | * registers of the vcpu as the guest sees them. host_debug_state are | |
834bf887 AB |
336 | * the host registers which are saved and restored during |
337 | * world switches. external_debug_state contains the debug | |
338 | * values we want to debug the guest. This is set via the | |
339 | * KVM_SET_GUEST_DEBUG ioctl. | |
84e690bf AB |
340 | * |
341 | * debug_ptr points to the set of debug registers that should be loaded | |
342 | * onto the hardware when running the guest. | |
343 | */ | |
344 | struct kvm_guest_debug_arch *debug_ptr; | |
345 | struct kvm_guest_debug_arch vcpu_debug_state; | |
834bf887 | 346 | struct kvm_guest_debug_arch external_debug_state; |
84e690bf | 347 | |
e6b673b7 | 348 | struct user_fpsimd_state *host_fpsimd_state; /* hyp VA */ |
52b28657 | 349 | struct task_struct *parent_task; |
e6b673b7 | 350 | |
f85279b4 WD |
351 | struct { |
352 | /* {Break,watch}point registers */ | |
353 | struct kvm_guest_debug_arch regs; | |
354 | /* Statistical profiling extension */ | |
355 | u64 pmscr_el1; | |
a1319260 SP |
356 | /* Self-hosted trace */ |
357 | u64 trfcr_el1; | |
f85279b4 | 358 | } host_debug_state; |
4f8d6632 MZ |
359 | |
360 | /* VGIC state */ | |
361 | struct vgic_cpu vgic_cpu; | |
362 | struct arch_timer_cpu timer_cpu; | |
04fe4726 | 363 | struct kvm_pmu pmu; |
4f8d6632 | 364 | |
337b99bf AB |
365 | /* |
366 | * Guest registers we preserve during guest debugging. | |
367 | * | |
368 | * These shadow registers are updated by the kvm_handle_sys_reg | |
369 | * trap handler if the guest accesses or updates them while we | |
370 | * are using guest debug. | |
371 | */ | |
372 | struct { | |
373 | u32 mdscr_el1; | |
374 | } guest_debug_preserved; | |
375 | ||
b171f9bb OU |
376 | /* vcpu power state */ |
377 | struct kvm_mp_state mp_state; | |
4f8d6632 | 378 | |
3b92830a EA |
379 | /* Don't run the guest (internal implementation need) */ |
380 | bool pause; | |
381 | ||
4f8d6632 MZ |
382 | /* Cache some mmu pages needed inside spinlock regions */ |
383 | struct kvm_mmu_memory_cache mmu_page_cache; | |
384 | ||
385 | /* Target CPU and feature flags */ | |
6c8c0c4d | 386 | int target; |
4f8d6632 MZ |
387 | DECLARE_BITMAP(features, KVM_VCPU_MAX_FEATURES); |
388 | ||
4715c14b JM |
389 | /* Virtual SError ESR to restore when HCR_EL2.VSE is set */ |
390 | u64 vsesr_el2; | |
d47533da | 391 | |
358b28f0 MZ |
392 | /* Additional reset state */ |
393 | struct vcpu_reset_state reset_state; | |
394 | ||
d47533da | 395 | /* True when deferrable sysregs are loaded on the physical CPU, |
13aeb9b4 | 396 | * see kvm_vcpu_load_sysregs_vhe and kvm_vcpu_put_sysregs_vhe. */ |
d47533da | 397 | bool sysregs_loaded_on_cpu; |
8564d637 SP |
398 | |
399 | /* Guest PV state */ | |
400 | struct { | |
8564d637 SP |
401 | u64 last_steal; |
402 | gpa_t base; | |
403 | } steal; | |
4f8d6632 MZ |
404 | }; |
405 | ||
b43b5dd9 | 406 | /* Pointer to the vcpu's SVE FFR for sve_{save,load}_state() */ |
985d3a1b MZ |
407 | #define vcpu_sve_pffr(vcpu) (kern_hyp_va((vcpu)->arch.sve_state) + \ |
408 | sve_ffr_offset((vcpu)->arch.sve_max_vl)) | |
b43b5dd9 | 409 | |
468f3477 | 410 | #define vcpu_sve_max_vq(vcpu) sve_vq_from_vl((vcpu)->arch.sve_max_vl) |
b43b5dd9 | 411 | |
e1c9c983 DM |
412 | #define vcpu_sve_state_size(vcpu) ({ \ |
413 | size_t __size_ret; \ | |
414 | unsigned int __vcpu_vq; \ | |
415 | \ | |
416 | if (WARN_ON(!sve_vl_valid((vcpu)->arch.sve_max_vl))) { \ | |
417 | __size_ret = 0; \ | |
418 | } else { \ | |
468f3477 | 419 | __vcpu_vq = vcpu_sve_max_vq(vcpu); \ |
e1c9c983 DM |
420 | __size_ret = SVE_SIG_REGS_SIZE(__vcpu_vq); \ |
421 | } \ | |
422 | \ | |
423 | __size_ret; \ | |
424 | }) | |
425 | ||
fa89d31c DM |
426 | /* vcpu_arch flags field values: */ |
427 | #define KVM_ARM64_DEBUG_DIRTY (1 << 0) | |
e6b673b7 DM |
428 | #define KVM_ARM64_FP_ENABLED (1 << 1) /* guest FP regs loaded */ |
429 | #define KVM_ARM64_FP_HOST (1 << 2) /* host FP regs loaded */ | |
b3eb56b6 | 430 | #define KVM_ARM64_HOST_SVE_ENABLED (1 << 4) /* SVE enabled for EL0 */ |
1765edba | 431 | #define KVM_ARM64_GUEST_HAS_SVE (1 << 5) /* SVE exposed to guest */ |
9033bba4 | 432 | #define KVM_ARM64_VCPU_SVE_FINALIZED (1 << 6) /* SVE config completed */ |
b890d75c | 433 | #define KVM_ARM64_GUEST_HAS_PTRAUTH (1 << 7) /* PTRAUTH exposed to guest */ |
e650b64f | 434 | #define KVM_ARM64_PENDING_EXCEPTION (1 << 8) /* Exception pending */ |
892fd259 MZ |
435 | /* |
436 | * Overlaps with KVM_ARM64_EXCEPT_MASK on purpose so that it can't be | |
437 | * set together with an exception... | |
438 | */ | |
439 | #define KVM_ARM64_INCREMENT_PC (1 << 9) /* Increment PC */ | |
e650b64f | 440 | #define KVM_ARM64_EXCEPT_MASK (7 << 9) /* Target EL/MODE */ |
e650b64f MZ |
441 | /* |
442 | * When KVM_ARM64_PENDING_EXCEPTION is set, KVM_ARM64_EXCEPT_MASK can | |
443 | * take the following values: | |
444 | * | |
445 | * For AArch32 EL1: | |
446 | */ | |
447 | #define KVM_ARM64_EXCEPT_AA32_UND (0 << 9) | |
448 | #define KVM_ARM64_EXCEPT_AA32_IABT (1 << 9) | |
449 | #define KVM_ARM64_EXCEPT_AA32_DABT (2 << 9) | |
450 | /* For AArch64: */ | |
451 | #define KVM_ARM64_EXCEPT_AA64_ELx_SYNC (0 << 9) | |
452 | #define KVM_ARM64_EXCEPT_AA64_ELx_IRQ (1 << 9) | |
453 | #define KVM_ARM64_EXCEPT_AA64_ELx_FIQ (2 << 9) | |
454 | #define KVM_ARM64_EXCEPT_AA64_ELx_SERR (3 << 9) | |
455 | #define KVM_ARM64_EXCEPT_AA64_EL1 (0 << 11) | |
456 | #define KVM_ARM64_EXCEPT_AA64_EL2 (1 << 11) | |
457 | ||
892fd259 MZ |
458 | #define KVM_ARM64_DEBUG_STATE_SAVE_SPE (1 << 12) /* Save SPE context if active */ |
459 | #define KVM_ARM64_DEBUG_STATE_SAVE_TRBE (1 << 13) /* Save TRBE context if active */ | |
af9a0e21 | 460 | #define KVM_ARM64_FP_FOREIGN_FPSTATE (1 << 14) |
583cda1b | 461 | #define KVM_ARM64_ON_UNSUPPORTED_CPU (1 << 15) /* Physical CPU not in supported_cpus */ |
861262ab | 462 | #define KVM_ARM64_HOST_SME_ENABLED (1 << 16) /* SME enabled for EL0 */ |
b2c4caf3 | 463 | #define KVM_ARM64_WFIT (1 << 17) /* WFIT instruction trapped */ |
892fd259 MZ |
464 | |
465 | #define KVM_GUESTDBG_VALID_MASK (KVM_GUESTDBG_ENABLE | \ | |
466 | KVM_GUESTDBG_USE_SW_BP | \ | |
467 | KVM_GUESTDBG_USE_HW | \ | |
468 | KVM_GUESTDBG_SINGLESTEP) | |
e650b64f MZ |
469 | |
470 | #define vcpu_has_sve(vcpu) (system_supports_sve() && \ | |
1765edba | 471 | ((vcpu)->arch.flags & KVM_ARM64_GUEST_HAS_SVE)) |
fa89d31c | 472 | |
bf4086b1 MZ |
473 | #ifdef CONFIG_ARM64_PTR_AUTH |
474 | #define vcpu_has_ptrauth(vcpu) \ | |
475 | ((cpus_have_final_cap(ARM64_HAS_ADDRESS_AUTH) || \ | |
476 | cpus_have_final_cap(ARM64_HAS_GENERIC_AUTH)) && \ | |
477 | (vcpu)->arch.flags & KVM_ARM64_GUEST_HAS_PTRAUTH) | |
478 | #else | |
479 | #define vcpu_has_ptrauth(vcpu) false | |
480 | #endif | |
b890d75c | 481 | |
583cda1b AE |
482 | #define vcpu_on_unsupported_cpu(vcpu) \ |
483 | ((vcpu)->arch.flags & KVM_ARM64_ON_UNSUPPORTED_CPU) | |
484 | ||
485 | #define vcpu_set_on_unsupported_cpu(vcpu) \ | |
486 | ((vcpu)->arch.flags |= KVM_ARM64_ON_UNSUPPORTED_CPU) | |
487 | ||
488 | #define vcpu_clear_on_unsupported_cpu(vcpu) \ | |
489 | ((vcpu)->arch.flags &= ~KVM_ARM64_ON_UNSUPPORTED_CPU) | |
490 | ||
e47c2055 | 491 | #define vcpu_gp_regs(v) (&(v)->arch.ctxt.regs) |
8d404c4c CD |
492 | |
493 | /* | |
1b422dd7 MZ |
494 | * Only use __vcpu_sys_reg/ctxt_sys_reg if you know you want the |
495 | * memory backed version of a register, and not the one most recently | |
496 | * accessed by a running VCPU. For example, for userspace access or | |
497 | * for system registers that are never context switched, but only | |
498 | * emulated. | |
8d404c4c | 499 | */ |
1b422dd7 MZ |
500 | #define __ctxt_sys_reg(c,r) (&(c)->sys_regs[(r)]) |
501 | ||
502 | #define ctxt_sys_reg(c,r) (*__ctxt_sys_reg(c,r)) | |
503 | ||
504 | #define __vcpu_sys_reg(v,r) (ctxt_sys_reg(&(v)->arch.ctxt, (r))) | |
8d404c4c | 505 | |
da6f1666 | 506 | u64 vcpu_read_sys_reg(const struct kvm_vcpu *vcpu, int reg); |
d47533da | 507 | void vcpu_write_sys_reg(struct kvm_vcpu *vcpu, u64 val, int reg); |
8d404c4c | 508 | |
21c81001 MZ |
509 | static inline bool __vcpu_read_sys_reg_from_cpu(int reg, u64 *val) |
510 | { | |
511 | /* | |
512 | * *** VHE ONLY *** | |
513 | * | |
514 | * System registers listed in the switch are not saved on every | |
515 | * exit from the guest but are only saved on vcpu_put. | |
516 | * | |
517 | * Note that MPIDR_EL1 for the guest is set by KVM via VMPIDR_EL2 but | |
518 | * should never be listed below, because the guest cannot modify its | |
519 | * own MPIDR_EL1 and MPIDR_EL1 is accessed for VCPU A from VCPU B's | |
520 | * thread when emulating cross-VCPU communication. | |
521 | */ | |
522 | if (!has_vhe()) | |
523 | return false; | |
524 | ||
525 | switch (reg) { | |
526 | case CSSELR_EL1: *val = read_sysreg_s(SYS_CSSELR_EL1); break; | |
527 | case SCTLR_EL1: *val = read_sysreg_s(SYS_SCTLR_EL12); break; | |
528 | case CPACR_EL1: *val = read_sysreg_s(SYS_CPACR_EL12); break; | |
529 | case TTBR0_EL1: *val = read_sysreg_s(SYS_TTBR0_EL12); break; | |
530 | case TTBR1_EL1: *val = read_sysreg_s(SYS_TTBR1_EL12); break; | |
531 | case TCR_EL1: *val = read_sysreg_s(SYS_TCR_EL12); break; | |
532 | case ESR_EL1: *val = read_sysreg_s(SYS_ESR_EL12); break; | |
533 | case AFSR0_EL1: *val = read_sysreg_s(SYS_AFSR0_EL12); break; | |
534 | case AFSR1_EL1: *val = read_sysreg_s(SYS_AFSR1_EL12); break; | |
535 | case FAR_EL1: *val = read_sysreg_s(SYS_FAR_EL12); break; | |
536 | case MAIR_EL1: *val = read_sysreg_s(SYS_MAIR_EL12); break; | |
537 | case VBAR_EL1: *val = read_sysreg_s(SYS_VBAR_EL12); break; | |
538 | case CONTEXTIDR_EL1: *val = read_sysreg_s(SYS_CONTEXTIDR_EL12);break; | |
539 | case TPIDR_EL0: *val = read_sysreg_s(SYS_TPIDR_EL0); break; | |
540 | case TPIDRRO_EL0: *val = read_sysreg_s(SYS_TPIDRRO_EL0); break; | |
541 | case TPIDR_EL1: *val = read_sysreg_s(SYS_TPIDR_EL1); break; | |
542 | case AMAIR_EL1: *val = read_sysreg_s(SYS_AMAIR_EL12); break; | |
543 | case CNTKCTL_EL1: *val = read_sysreg_s(SYS_CNTKCTL_EL12); break; | |
544 | case ELR_EL1: *val = read_sysreg_s(SYS_ELR_EL12); break; | |
545 | case PAR_EL1: *val = read_sysreg_par(); break; | |
546 | case DACR32_EL2: *val = read_sysreg_s(SYS_DACR32_EL2); break; | |
547 | case IFSR32_EL2: *val = read_sysreg_s(SYS_IFSR32_EL2); break; | |
548 | case DBGVCR32_EL2: *val = read_sysreg_s(SYS_DBGVCR32_EL2); break; | |
549 | default: return false; | |
550 | } | |
551 | ||
552 | return true; | |
553 | } | |
554 | ||
555 | static inline bool __vcpu_write_sys_reg_to_cpu(u64 val, int reg) | |
556 | { | |
557 | /* | |
558 | * *** VHE ONLY *** | |
559 | * | |
560 | * System registers listed in the switch are not restored on every | |
561 | * entry to the guest but are only restored on vcpu_load. | |
562 | * | |
563 | * Note that MPIDR_EL1 for the guest is set by KVM via VMPIDR_EL2 but | |
564 | * should never be listed below, because the MPIDR should only be set | |
565 | * once, before running the VCPU, and never changed later. | |
566 | */ | |
567 | if (!has_vhe()) | |
568 | return false; | |
569 | ||
570 | switch (reg) { | |
571 | case CSSELR_EL1: write_sysreg_s(val, SYS_CSSELR_EL1); break; | |
572 | case SCTLR_EL1: write_sysreg_s(val, SYS_SCTLR_EL12); break; | |
573 | case CPACR_EL1: write_sysreg_s(val, SYS_CPACR_EL12); break; | |
574 | case TTBR0_EL1: write_sysreg_s(val, SYS_TTBR0_EL12); break; | |
575 | case TTBR1_EL1: write_sysreg_s(val, SYS_TTBR1_EL12); break; | |
576 | case TCR_EL1: write_sysreg_s(val, SYS_TCR_EL12); break; | |
577 | case ESR_EL1: write_sysreg_s(val, SYS_ESR_EL12); break; | |
578 | case AFSR0_EL1: write_sysreg_s(val, SYS_AFSR0_EL12); break; | |
579 | case AFSR1_EL1: write_sysreg_s(val, SYS_AFSR1_EL12); break; | |
580 | case FAR_EL1: write_sysreg_s(val, SYS_FAR_EL12); break; | |
581 | case MAIR_EL1: write_sysreg_s(val, SYS_MAIR_EL12); break; | |
582 | case VBAR_EL1: write_sysreg_s(val, SYS_VBAR_EL12); break; | |
583 | case CONTEXTIDR_EL1: write_sysreg_s(val, SYS_CONTEXTIDR_EL12);break; | |
584 | case TPIDR_EL0: write_sysreg_s(val, SYS_TPIDR_EL0); break; | |
585 | case TPIDRRO_EL0: write_sysreg_s(val, SYS_TPIDRRO_EL0); break; | |
586 | case TPIDR_EL1: write_sysreg_s(val, SYS_TPIDR_EL1); break; | |
587 | case AMAIR_EL1: write_sysreg_s(val, SYS_AMAIR_EL12); break; | |
588 | case CNTKCTL_EL1: write_sysreg_s(val, SYS_CNTKCTL_EL12); break; | |
589 | case ELR_EL1: write_sysreg_s(val, SYS_ELR_EL12); break; | |
590 | case PAR_EL1: write_sysreg_s(val, SYS_PAR_EL1); break; | |
591 | case DACR32_EL2: write_sysreg_s(val, SYS_DACR32_EL2); break; | |
592 | case IFSR32_EL2: write_sysreg_s(val, SYS_IFSR32_EL2); break; | |
593 | case DBGVCR32_EL2: write_sysreg_s(val, SYS_DBGVCR32_EL2); break; | |
594 | default: return false; | |
595 | } | |
596 | ||
597 | return true; | |
598 | } | |
599 | ||
4f8d6632 | 600 | struct kvm_vm_stat { |
0193cc90 | 601 | struct kvm_vm_stat_generic generic; |
4f8d6632 MZ |
602 | }; |
603 | ||
604 | struct kvm_vcpu_stat { | |
0193cc90 | 605 | struct kvm_vcpu_stat_generic generic; |
8a7e75d4 | 606 | u64 hvc_exit_stat; |
b19e6892 AT |
607 | u64 wfe_exit_stat; |
608 | u64 wfi_exit_stat; | |
609 | u64 mmio_exit_user; | |
610 | u64 mmio_exit_kernel; | |
fe5161d2 | 611 | u64 signal_exits; |
b19e6892 | 612 | u64 exits; |
4f8d6632 MZ |
613 | }; |
614 | ||
08e873cb | 615 | void kvm_vcpu_preferred_target(struct kvm_vcpu_init *init); |
4f8d6632 MZ |
616 | unsigned long kvm_arm_num_regs(struct kvm_vcpu *vcpu); |
617 | int kvm_arm_copy_reg_indices(struct kvm_vcpu *vcpu, u64 __user *indices); | |
4f8d6632 MZ |
618 | int kvm_arm_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg); |
619 | int kvm_arm_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg); | |
6ac4a5ac MZ |
620 | |
621 | unsigned long kvm_arm_num_sys_reg_descs(struct kvm_vcpu *vcpu); | |
622 | int kvm_arm_copy_sys_reg_indices(struct kvm_vcpu *vcpu, u64 __user *uindices); | |
623 | int kvm_arm_sys_reg_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *); | |
624 | int kvm_arm_sys_reg_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *); | |
625 | ||
539aee0e JM |
626 | int __kvm_arm_vcpu_get_events(struct kvm_vcpu *vcpu, |
627 | struct kvm_vcpu_events *events); | |
b7b27fac | 628 | |
539aee0e JM |
629 | int __kvm_arm_vcpu_set_events(struct kvm_vcpu *vcpu, |
630 | struct kvm_vcpu_events *events); | |
4f8d6632 MZ |
631 | |
632 | #define KVM_ARCH_WANT_MMU_NOTIFIER | |
4f8d6632 | 633 | |
b13216cf CD |
634 | void kvm_arm_halt_guest(struct kvm *kvm); |
635 | void kvm_arm_resume_guest(struct kvm *kvm); | |
4f8d6632 | 636 | |
cc5705fb MZ |
637 | #define vcpu_has_run_once(vcpu) !!rcu_access_pointer((vcpu)->pid) |
638 | ||
40a50853 | 639 | #ifndef __KVM_NVHE_HYPERVISOR__ |
05469831 | 640 | #define kvm_call_hyp_nvhe(f, ...) \ |
f50b6f6a | 641 | ({ \ |
05469831 AS |
642 | struct arm_smccc_res res; \ |
643 | \ | |
644 | arm_smccc_1_1_hvc(KVM_HOST_SMCCC_FUNC(f), \ | |
645 | ##__VA_ARGS__, &res); \ | |
646 | WARN_ON(res.a0 != SMCCC_RET_SUCCESS); \ | |
647 | \ | |
648 | res.a1; \ | |
f50b6f6a AS |
649 | }) |
650 | ||
18fc7bf8 MZ |
651 | /* |
652 | * The couple of isb() below are there to guarantee the same behaviour | |
653 | * on VHE as on !VHE, where the eret to EL1 acts as a context | |
654 | * synchronization event. | |
655 | */ | |
656 | #define kvm_call_hyp(f, ...) \ | |
657 | do { \ | |
658 | if (has_vhe()) { \ | |
659 | f(__VA_ARGS__); \ | |
660 | isb(); \ | |
661 | } else { \ | |
f50b6f6a | 662 | kvm_call_hyp_nvhe(f, ##__VA_ARGS__); \ |
18fc7bf8 MZ |
663 | } \ |
664 | } while(0) | |
665 | ||
666 | #define kvm_call_hyp_ret(f, ...) \ | |
667 | ({ \ | |
668 | typeof(f(__VA_ARGS__)) ret; \ | |
669 | \ | |
670 | if (has_vhe()) { \ | |
671 | ret = f(__VA_ARGS__); \ | |
672 | isb(); \ | |
673 | } else { \ | |
05469831 | 674 | ret = kvm_call_hyp_nvhe(f, ##__VA_ARGS__); \ |
18fc7bf8 MZ |
675 | } \ |
676 | \ | |
677 | ret; \ | |
678 | }) | |
40a50853 QP |
679 | #else /* __KVM_NVHE_HYPERVISOR__ */ |
680 | #define kvm_call_hyp(f, ...) f(__VA_ARGS__) | |
681 | #define kvm_call_hyp_ret(f, ...) f(__VA_ARGS__) | |
682 | #define kvm_call_hyp_nvhe(f, ...) f(__VA_ARGS__) | |
683 | #endif /* __KVM_NVHE_HYPERVISOR__ */ | |
22b39ca3 | 684 | |
cf5d3188 | 685 | void force_vm_exit(const cpumask_t *mask); |
4f8d6632 | 686 | |
74cc7e0c TZ |
687 | int handle_exit(struct kvm_vcpu *vcpu, int exception_index); |
688 | void handle_exit_early(struct kvm_vcpu *vcpu, int exception_index); | |
4f8d6632 | 689 | |
6ac4a5ac MZ |
690 | int kvm_handle_cp14_load_store(struct kvm_vcpu *vcpu); |
691 | int kvm_handle_cp14_32(struct kvm_vcpu *vcpu); | |
692 | int kvm_handle_cp14_64(struct kvm_vcpu *vcpu); | |
693 | int kvm_handle_cp15_32(struct kvm_vcpu *vcpu); | |
694 | int kvm_handle_cp15_64(struct kvm_vcpu *vcpu); | |
695 | int kvm_handle_sys_reg(struct kvm_vcpu *vcpu); | |
9369bc5c | 696 | int kvm_handle_cp10_id(struct kvm_vcpu *vcpu); |
6ac4a5ac MZ |
697 | |
698 | void kvm_reset_sys_regs(struct kvm_vcpu *vcpu); | |
699 | ||
f1f0c0cf | 700 | int kvm_sys_reg_table_init(void); |
6ac4a5ac | 701 | |
0e20f5e2 MZ |
702 | /* MMIO helpers */ |
703 | void kvm_mmio_write_buf(void *buf, unsigned int len, unsigned long data); | |
704 | unsigned long kvm_mmio_read_buf(const void *buf, unsigned int len); | |
705 | ||
74cc7e0c TZ |
706 | int kvm_handle_mmio_return(struct kvm_vcpu *vcpu); |
707 | int io_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa); | |
0e20f5e2 | 708 | |
e1bfc245 SC |
709 | /* |
710 | * Returns true if a Performance Monitoring Interrupt (PMI), a.k.a. perf event, | |
711 | * arrived in guest context. For arm64, any event that arrives while a vCPU is | |
712 | * loaded is considered to be "in guest". | |
713 | */ | |
714 | static inline bool kvm_arch_pmi_in_guest(struct kvm_vcpu *vcpu) | |
715 | { | |
716 | return IS_ENABLED(CONFIG_GUEST_PERF_EVENTS) && !!vcpu; | |
717 | } | |
718 | ||
b48c1a45 | 719 | long kvm_hypercall_pv_features(struct kvm_vcpu *vcpu); |
8564d637 SP |
720 | gpa_t kvm_init_stolen_time(struct kvm_vcpu *vcpu); |
721 | void kvm_update_stolen_time(struct kvm_vcpu *vcpu); | |
722 | ||
004a0124 | 723 | bool kvm_arm_pvtime_supported(void); |
58772e9a SP |
724 | int kvm_arm_pvtime_set_attr(struct kvm_vcpu *vcpu, |
725 | struct kvm_device_attr *attr); | |
726 | int kvm_arm_pvtime_get_attr(struct kvm_vcpu *vcpu, | |
727 | struct kvm_device_attr *attr); | |
728 | int kvm_arm_pvtime_has_attr(struct kvm_vcpu *vcpu, | |
729 | struct kvm_device_attr *attr); | |
730 | ||
f8051e96 | 731 | extern unsigned int kvm_arm_vmid_bits; |
41783839 SK |
732 | int kvm_arm_vmid_alloc_init(void); |
733 | void kvm_arm_vmid_alloc_free(void); | |
734 | void kvm_arm_vmid_update(struct kvm_vmid *kvm_vmid); | |
100b4f09 | 735 | void kvm_arm_vmid_clear_active(void); |
41783839 | 736 | |
8564d637 SP |
737 | static inline void kvm_arm_pvtime_vcpu_init(struct kvm_vcpu_arch *vcpu_arch) |
738 | { | |
739 | vcpu_arch->steal.base = GPA_INVALID; | |
740 | } | |
741 | ||
742 | static inline bool kvm_arm_is_pvtime_enabled(struct kvm_vcpu_arch *vcpu_arch) | |
743 | { | |
744 | return (vcpu_arch->steal.base != GPA_INVALID); | |
745 | } | |
b48c1a45 | 746 | |
b7b27fac DG |
747 | void kvm_set_sei_esr(struct kvm_vcpu *vcpu, u64 syndrome); |
748 | ||
4429fc64 AP |
749 | struct kvm_vcpu *kvm_mpidr_to_vcpu(struct kvm *kvm, unsigned long mpidr); |
750 | ||
14ef9d04 | 751 | DECLARE_KVM_HYP_PER_CPU(struct kvm_host_data, kvm_host_data); |
4464e210 | 752 | |
1e0cf16c | 753 | static inline void kvm_init_host_cpu_context(struct kvm_cpu_context *cpu_ctxt) |
32f13955 MZ |
754 | { |
755 | /* The host's MPIDR is immutable, so let's set it up at boot time */ | |
71071acf | 756 | ctxt_sys_reg(cpu_ctxt, MPIDR_EL1) = read_cpuid_mpidr(); |
32f13955 MZ |
757 | } |
758 | ||
5bdf3437 JM |
759 | static inline bool kvm_system_needs_idmapped_vectors(void) |
760 | { | |
761 | return cpus_have_const_cap(ARM64_SPECTRE_V3A); | |
762 | } | |
763 | ||
384b40ca MR |
764 | void kvm_arm_vcpu_ptrauth_trap(struct kvm_vcpu *vcpu); |
765 | ||
0865e636 RK |
766 | static inline void kvm_arch_hardware_unsetup(void) {} |
767 | static inline void kvm_arch_sync_events(struct kvm *kvm) {} | |
0865e636 RK |
768 | static inline void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu) {} |
769 | ||
56c7f5e7 | 770 | void kvm_arm_init_debug(void); |
263d6287 | 771 | void kvm_arm_vcpu_init_debug(struct kvm_vcpu *vcpu); |
56c7f5e7 AB |
772 | void kvm_arm_setup_debug(struct kvm_vcpu *vcpu); |
773 | void kvm_arm_clear_debug(struct kvm_vcpu *vcpu); | |
84e690bf | 774 | void kvm_arm_reset_debug_ptr(struct kvm_vcpu *vcpu); |
7dabf02f OU |
775 | |
776 | #define kvm_vcpu_os_lock_enabled(vcpu) \ | |
777 | (!!(__vcpu_sys_reg(vcpu, OSLSR_EL1) & SYS_OSLSR_OSLK)) | |
778 | ||
bb0c70bc SZ |
779 | int kvm_arm_vcpu_arch_set_attr(struct kvm_vcpu *vcpu, |
780 | struct kvm_device_attr *attr); | |
781 | int kvm_arm_vcpu_arch_get_attr(struct kvm_vcpu *vcpu, | |
782 | struct kvm_device_attr *attr); | |
783 | int kvm_arm_vcpu_arch_has_attr(struct kvm_vcpu *vcpu, | |
784 | struct kvm_device_attr *attr); | |
56c7f5e7 | 785 | |
f0376edb SP |
786 | long kvm_vm_ioctl_mte_copy_tags(struct kvm *kvm, |
787 | struct kvm_arm_copy_mte_tags *copy_tags); | |
788 | ||
e6b673b7 DM |
789 | /* Guest/host FPSIMD coordination helpers */ |
790 | int kvm_arch_vcpu_run_map_fp(struct kvm_vcpu *vcpu); | |
791 | void kvm_arch_vcpu_load_fp(struct kvm_vcpu *vcpu); | |
af9a0e21 | 792 | void kvm_arch_vcpu_ctxflush_fp(struct kvm_vcpu *vcpu); |
e6b673b7 DM |
793 | void kvm_arch_vcpu_ctxsync_fp(struct kvm_vcpu *vcpu); |
794 | void kvm_arch_vcpu_put_fp(struct kvm_vcpu *vcpu); | |
52b28657 | 795 | void kvm_vcpu_unshare_task_fp(struct kvm_vcpu *vcpu); |
e6b673b7 | 796 | |
eb41238c AM |
797 | static inline bool kvm_pmu_counter_deferred(struct perf_event_attr *attr) |
798 | { | |
435e53fb | 799 | return (!has_vhe() && attr->exclude_host); |
eb41238c AM |
800 | } |
801 | ||
d2602bb4 SP |
802 | /* Flags for host debug state */ |
803 | void kvm_arch_vcpu_load_debug_state_flags(struct kvm_vcpu *vcpu); | |
804 | void kvm_arch_vcpu_put_debug_state_flags(struct kvm_vcpu *vcpu); | |
805 | ||
052f064d | 806 | #ifdef CONFIG_KVM |
eb41238c AM |
807 | void kvm_set_pmu_events(u32 set, struct perf_event_attr *attr); |
808 | void kvm_clr_pmu_events(u32 clr); | |
809 | #else | |
810 | static inline void kvm_set_pmu_events(u32 set, struct perf_event_attr *attr) {} | |
811 | static inline void kvm_clr_pmu_events(u32 clr) {} | |
e6b673b7 | 812 | #endif |
17eed27b | 813 | |
13aeb9b4 DB |
814 | void kvm_vcpu_load_sysregs_vhe(struct kvm_vcpu *vcpu); |
815 | void kvm_vcpu_put_sysregs_vhe(struct kvm_vcpu *vcpu); | |
bc192cee | 816 | |
b130a8f7 | 817 | int kvm_set_ipa_limit(void); |
0f62f0e9 | 818 | |
d1e5b0e9 MO |
819 | #define __KVM_HAVE_ARCH_VM_ALLOC |
820 | struct kvm *kvm_arch_alloc_vm(void); | |
d1e5b0e9 | 821 | |
bca607eb | 822 | int kvm_arm_setup_stage2(struct kvm *kvm, unsigned long type); |
5b6c6742 | 823 | |
2ea7f655 FT |
824 | static inline bool kvm_vm_is_protected(struct kvm *kvm) |
825 | { | |
826 | return false; | |
827 | } | |
828 | ||
2a0c3433 FT |
829 | void kvm_init_protected_traps(struct kvm_vcpu *vcpu); |
830 | ||
92e68b2b | 831 | int kvm_arm_vcpu_finalize(struct kvm_vcpu *vcpu, int feature); |
9033bba4 DM |
832 | bool kvm_arm_vcpu_is_finalized(struct kvm_vcpu *vcpu); |
833 | ||
834 | #define kvm_arm_vcpu_sve_finalized(vcpu) \ | |
835 | ((vcpu)->arch.flags & KVM_ARM64_VCPU_SVE_FINALIZED) | |
7dd32a0d | 836 | |
06394531 MZ |
837 | #define kvm_has_mte(kvm) \ |
838 | (system_supports_mte() && \ | |
839 | test_bit(KVM_ARCH_FLAG_MTE_ENABLED, &(kvm)->arch.flags)) | |
14bda7a9 | 840 | |
a8e190cd | 841 | int kvm_trng_call(struct kvm_vcpu *vcpu); |
f320bc74 QP |
842 | #ifdef CONFIG_KVM |
843 | extern phys_addr_t hyp_mem_base; | |
844 | extern phys_addr_t hyp_mem_size; | |
845 | void __init kvm_hyp_reserve(void); | |
846 | #else | |
847 | static inline void kvm_hyp_reserve(void) { } | |
848 | #endif | |
a8e190cd | 849 | |
1e579429 | 850 | void kvm_arm_vcpu_power_off(struct kvm_vcpu *vcpu); |
b171f9bb | 851 | bool kvm_arm_vcpu_stopped(struct kvm_vcpu *vcpu); |
1e579429 | 852 | |
4f8d6632 | 853 | #endif /* __ARM64_KVM_HOST_H__ */ |