Commit | Line | Data |
---|---|---|
caab277b | 1 | /* SPDX-License-Identifier: GPL-2.0-only */ |
4f8d6632 MZ |
2 | /* |
3 | * Copyright (C) 2012,2013 - ARM Ltd | |
4 | * Author: Marc Zyngier <marc.zyngier@arm.com> | |
5 | * | |
6 | * Derived from arch/arm/include/asm/kvm_host.h: | |
7 | * Copyright (C) 2012 - Virtual Open Systems and Columbia University | |
8 | * Author: Christoffer Dall <c.dall@virtualopensystems.com> | |
4f8d6632 MZ |
9 | */ |
10 | ||
11 | #ifndef __ARM64_KVM_HOST_H__ | |
12 | #define __ARM64_KVM_HOST_H__ | |
13 | ||
3f61f409 | 14 | #include <linux/bitmap.h> |
65647300 | 15 | #include <linux/types.h> |
3f61f409 | 16 | #include <linux/jump_label.h> |
65647300 | 17 | #include <linux/kvm_types.h> |
3f61f409 | 18 | #include <linux/percpu.h> |
85738e05 | 19 | #include <asm/arch_gicv3.h> |
3f61f409 | 20 | #include <asm/barrier.h> |
63a1e1c9 | 21 | #include <asm/cpufeature.h> |
4f5abad9 | 22 | #include <asm/daifflags.h> |
17eed27b | 23 | #include <asm/fpsimd.h> |
4f8d6632 | 24 | #include <asm/kvm.h> |
3a3604bc | 25 | #include <asm/kvm_asm.h> |
4f8d6632 | 26 | #include <asm/kvm_mmio.h> |
32f13955 | 27 | #include <asm/smp_plat.h> |
e6b673b7 | 28 | #include <asm/thread_info.h> |
4f8d6632 | 29 | |
c1426e4c EA |
30 | #define __KVM_HAVE_ARCH_INTC_INITIALIZED |
31 | ||
955a3fc6 | 32 | #define KVM_USER_MEM_SLOTS 512 |
920552b2 | 33 | #define KVM_HALT_POLL_NS_DEFAULT 500000 |
4f8d6632 MZ |
34 | |
35 | #include <kvm/arm_vgic.h> | |
36 | #include <kvm/arm_arch_timer.h> | |
04fe4726 | 37 | #include <kvm/arm_pmu.h> |
4f8d6632 | 38 | |
ef748917 ML |
39 | #define KVM_MAX_VCPUS VGIC_V3_MAX_CPUS |
40 | ||
a22fa321 | 41 | #define KVM_VCPU_MAX_FEATURES 7 |
4f8d6632 | 42 | |
7b244e2b | 43 | #define KVM_REQ_SLEEP \ |
2387149e | 44 | KVM_ARCH_REQ_FLAGS(0, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP) |
325f9c64 | 45 | #define KVM_REQ_IRQ_PENDING KVM_ARCH_REQ(1) |
358b28f0 | 46 | #define KVM_REQ_VCPU_RESET KVM_ARCH_REQ(2) |
b13216cf | 47 | |
61bbe380 CD |
48 | DECLARE_STATIC_KEY_FALSE(userspace_irqchip_in_use); |
49 | ||
9033bba4 | 50 | extern unsigned int kvm_sve_max_vl; |
a3be836d | 51 | int kvm_arm_init_sve(void); |
0f062bfe | 52 | |
6951e48b | 53 | int __attribute_const__ kvm_target_cpu(void); |
4f8d6632 | 54 | int kvm_reset_vcpu(struct kvm_vcpu *vcpu); |
9033bba4 | 55 | void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu); |
375bdd3b | 56 | int kvm_arch_vm_ioctl_check_extension(struct kvm *kvm, long ext); |
c612505f | 57 | void __extended_idmap_trampoline(phys_addr_t boot_pgd, phys_addr_t idmap_start); |
4f8d6632 | 58 | |
e329fb75 | 59 | struct kvm_vmid { |
4f8d6632 MZ |
60 | /* The VMID generation used for the virt. memory system */ |
61 | u64 vmid_gen; | |
62 | u32 vmid; | |
e329fb75 CD |
63 | }; |
64 | ||
65 | struct kvm_arch { | |
66 | struct kvm_vmid vmid; | |
4f8d6632 | 67 | |
7665f3a8 | 68 | /* stage2 entry level table */ |
4f8d6632 | 69 | pgd_t *pgd; |
e329fb75 | 70 | phys_addr_t pgd_phys; |
4f8d6632 | 71 | |
7665f3a8 SP |
72 | /* VTCR_EL2 value for this VM */ |
73 | u64 vtcr; | |
4f8d6632 | 74 | |
94d0e598 MZ |
75 | /* The last vcpu id that ran on each physical CPU */ |
76 | int __percpu *last_vcpu_ran; | |
77 | ||
3caa2d8c AP |
78 | /* The maximum number of vCPUs depends on the used GIC model */ |
79 | int max_vcpus; | |
80 | ||
4f8d6632 MZ |
81 | /* Interrupt controller */ |
82 | struct vgic_dist vgic; | |
85bd0ba1 MZ |
83 | |
84 | /* Mandated version of PSCI */ | |
85 | u32 psci_version; | |
4f8d6632 MZ |
86 | }; |
87 | ||
88 | #define KVM_NR_MEM_OBJS 40 | |
89 | ||
90 | /* | |
91 | * We don't want allocation failures within the mmu code, so we preallocate | |
92 | * enough memory for a single page fault in a cache. | |
93 | */ | |
94 | struct kvm_mmu_memory_cache { | |
95 | int nobjs; | |
96 | void *objects[KVM_NR_MEM_OBJS]; | |
97 | }; | |
98 | ||
99 | struct kvm_vcpu_fault_info { | |
100 | u32 esr_el2; /* Hyp Syndrom Register */ | |
101 | u64 far_el2; /* Hyp Fault Address Register */ | |
102 | u64 hpfar_el2; /* Hyp IPA Fault Address Register */ | |
0067df41 | 103 | u64 disr_el1; /* Deferred [SError] Status Register */ |
4f8d6632 MZ |
104 | }; |
105 | ||
9d8415d6 MZ |
106 | /* |
107 | * 0 is reserved as an invalid value. | |
108 | * Order should be kept in sync with the save/restore code. | |
109 | */ | |
110 | enum vcpu_sysreg { | |
111 | __INVALID_SYSREG__, | |
112 | MPIDR_EL1, /* MultiProcessor Affinity Register */ | |
113 | CSSELR_EL1, /* Cache Size Selection Register */ | |
114 | SCTLR_EL1, /* System Control Register */ | |
115 | ACTLR_EL1, /* Auxiliary Control Register */ | |
116 | CPACR_EL1, /* Coprocessor Access Control */ | |
73433762 | 117 | ZCR_EL1, /* SVE Control */ |
9d8415d6 MZ |
118 | TTBR0_EL1, /* Translation Table Base Register 0 */ |
119 | TTBR1_EL1, /* Translation Table Base Register 1 */ | |
120 | TCR_EL1, /* Translation Control Register */ | |
121 | ESR_EL1, /* Exception Syndrome Register */ | |
ef769e32 AB |
122 | AFSR0_EL1, /* Auxiliary Fault Status Register 0 */ |
123 | AFSR1_EL1, /* Auxiliary Fault Status Register 1 */ | |
9d8415d6 MZ |
124 | FAR_EL1, /* Fault Address Register */ |
125 | MAIR_EL1, /* Memory Attribute Indirection Register */ | |
126 | VBAR_EL1, /* Vector Base Address Register */ | |
127 | CONTEXTIDR_EL1, /* Context ID Register */ | |
128 | TPIDR_EL0, /* Thread ID, User R/W */ | |
129 | TPIDRRO_EL0, /* Thread ID, User R/O */ | |
130 | TPIDR_EL1, /* Thread ID, Privileged */ | |
131 | AMAIR_EL1, /* Aux Memory Attribute Indirection Register */ | |
132 | CNTKCTL_EL1, /* Timer Control Register (EL1) */ | |
133 | PAR_EL1, /* Physical Address Register */ | |
134 | MDSCR_EL1, /* Monitor Debug System Control Register */ | |
135 | MDCCINT_EL1, /* Monitor Debug Comms Channel Interrupt Enable Reg */ | |
c773ae2b | 136 | DISR_EL1, /* Deferred Interrupt Status Register */ |
9d8415d6 | 137 | |
ab946834 SZ |
138 | /* Performance Monitors Registers */ |
139 | PMCR_EL0, /* Control Register */ | |
3965c3ce | 140 | PMSELR_EL0, /* Event Counter Selection Register */ |
051ff581 SZ |
141 | PMEVCNTR0_EL0, /* Event Counter Register (0-30) */ |
142 | PMEVCNTR30_EL0 = PMEVCNTR0_EL0 + 30, | |
143 | PMCCNTR_EL0, /* Cycle Counter Register */ | |
9feb21ac SZ |
144 | PMEVTYPER0_EL0, /* Event Type Register (0-30) */ |
145 | PMEVTYPER30_EL0 = PMEVTYPER0_EL0 + 30, | |
146 | PMCCFILTR_EL0, /* Cycle Count Filter Register */ | |
96b0eebc | 147 | PMCNTENSET_EL0, /* Count Enable Set Register */ |
9db52c78 | 148 | PMINTENSET_EL1, /* Interrupt Enable Set Register */ |
76d883c4 | 149 | PMOVSSET_EL0, /* Overflow Flag Status Set Register */ |
7a0adc70 | 150 | PMSWINC_EL0, /* Software Increment Register */ |
d692b8ad | 151 | PMUSERENR_EL0, /* User Enable Register */ |
ab946834 | 152 | |
384b40ca MR |
153 | /* Pointer Authentication Registers in a strict increasing order. */ |
154 | APIAKEYLO_EL1, | |
155 | APIAKEYHI_EL1, | |
156 | APIBKEYLO_EL1, | |
157 | APIBKEYHI_EL1, | |
158 | APDAKEYLO_EL1, | |
159 | APDAKEYHI_EL1, | |
160 | APDBKEYLO_EL1, | |
161 | APDBKEYHI_EL1, | |
162 | APGAKEYLO_EL1, | |
163 | APGAKEYHI_EL1, | |
164 | ||
9d8415d6 MZ |
165 | /* 32bit specific registers. Keep them at the end of the range */ |
166 | DACR32_EL2, /* Domain Access Control Register */ | |
167 | IFSR32_EL2, /* Instruction Fault Status Register */ | |
168 | FPEXC32_EL2, /* Floating-Point Exception Control Register */ | |
169 | DBGVCR32_EL2, /* Debug Vector Catch Register */ | |
170 | ||
171 | NR_SYS_REGS /* Nothing after this line! */ | |
172 | }; | |
173 | ||
174 | /* 32bit mapping */ | |
175 | #define c0_MPIDR (MPIDR_EL1 * 2) /* MultiProcessor ID Register */ | |
176 | #define c0_CSSELR (CSSELR_EL1 * 2)/* Cache Size Selection Register */ | |
177 | #define c1_SCTLR (SCTLR_EL1 * 2) /* System Control Register */ | |
178 | #define c1_ACTLR (ACTLR_EL1 * 2) /* Auxiliary Control Register */ | |
179 | #define c1_CPACR (CPACR_EL1 * 2) /* Coprocessor Access Control */ | |
180 | #define c2_TTBR0 (TTBR0_EL1 * 2) /* Translation Table Base Register 0 */ | |
181 | #define c2_TTBR0_high (c2_TTBR0 + 1) /* TTBR0 top 32 bits */ | |
182 | #define c2_TTBR1 (TTBR1_EL1 * 2) /* Translation Table Base Register 1 */ | |
183 | #define c2_TTBR1_high (c2_TTBR1 + 1) /* TTBR1 top 32 bits */ | |
184 | #define c2_TTBCR (TCR_EL1 * 2) /* Translation Table Base Control R. */ | |
185 | #define c3_DACR (DACR32_EL2 * 2)/* Domain Access Control Register */ | |
186 | #define c5_DFSR (ESR_EL1 * 2) /* Data Fault Status Register */ | |
187 | #define c5_IFSR (IFSR32_EL2 * 2)/* Instruction Fault Status Register */ | |
188 | #define c5_ADFSR (AFSR0_EL1 * 2) /* Auxiliary Data Fault Status R */ | |
189 | #define c5_AIFSR (AFSR1_EL1 * 2) /* Auxiliary Instr Fault Status R */ | |
190 | #define c6_DFAR (FAR_EL1 * 2) /* Data Fault Address Register */ | |
191 | #define c6_IFAR (c6_DFAR + 1) /* Instruction Fault Address Register */ | |
192 | #define c7_PAR (PAR_EL1 * 2) /* Physical Address Register */ | |
193 | #define c7_PAR_high (c7_PAR + 1) /* PAR top 32 bits */ | |
194 | #define c10_PRRR (MAIR_EL1 * 2) /* Primary Region Remap Register */ | |
195 | #define c10_NMRR (c10_PRRR + 1) /* Normal Memory Remap Register */ | |
196 | #define c12_VBAR (VBAR_EL1 * 2) /* Vector Base Address Register */ | |
197 | #define c13_CID (CONTEXTIDR_EL1 * 2) /* Context ID Register */ | |
198 | #define c13_TID_URW (TPIDR_EL0 * 2) /* Thread ID, User R/W */ | |
199 | #define c13_TID_URO (TPIDRRO_EL0 * 2)/* Thread ID, User R/O */ | |
200 | #define c13_TID_PRIV (TPIDR_EL1 * 2) /* Thread ID, Privileged */ | |
201 | #define c10_AMAIR0 (AMAIR_EL1 * 2) /* Aux Memory Attr Indirection Reg */ | |
202 | #define c10_AMAIR1 (c10_AMAIR0 + 1)/* Aux Memory Attr Indirection Reg */ | |
203 | #define c14_CNTKCTL (CNTKCTL_EL1 * 2) /* Timer Control Register (PL1) */ | |
204 | ||
205 | #define cp14_DBGDSCRext (MDSCR_EL1 * 2) | |
206 | #define cp14_DBGBCR0 (DBGBCR0_EL1 * 2) | |
207 | #define cp14_DBGBVR0 (DBGBVR0_EL1 * 2) | |
208 | #define cp14_DBGBXVR0 (cp14_DBGBVR0 + 1) | |
209 | #define cp14_DBGWCR0 (DBGWCR0_EL1 * 2) | |
210 | #define cp14_DBGWVR0 (DBGWVR0_EL1 * 2) | |
211 | #define cp14_DBGDCCINT (MDCCINT_EL1 * 2) | |
212 | ||
213 | #define NR_COPRO_REGS (NR_SYS_REGS * 2) | |
214 | ||
4f8d6632 MZ |
215 | struct kvm_cpu_context { |
216 | struct kvm_regs gp_regs; | |
40033a61 MZ |
217 | union { |
218 | u64 sys_regs[NR_SYS_REGS]; | |
72564016 | 219 | u32 copro[NR_COPRO_REGS]; |
40033a61 | 220 | }; |
c97e166e JM |
221 | |
222 | struct kvm_vcpu *__hyp_running_vcpu; | |
4f8d6632 MZ |
223 | }; |
224 | ||
eb41238c AM |
225 | struct kvm_pmu_events { |
226 | u32 events_host; | |
227 | u32 events_guest; | |
228 | }; | |
229 | ||
630a1685 AM |
230 | struct kvm_host_data { |
231 | struct kvm_cpu_context host_ctxt; | |
eb41238c | 232 | struct kvm_pmu_events pmu_events; |
630a1685 AM |
233 | }; |
234 | ||
235 | typedef struct kvm_host_data kvm_host_data_t; | |
4f8d6632 | 236 | |
358b28f0 MZ |
237 | struct vcpu_reset_state { |
238 | unsigned long pc; | |
239 | unsigned long r0; | |
240 | bool be; | |
241 | bool reset; | |
242 | }; | |
243 | ||
4f8d6632 MZ |
244 | struct kvm_vcpu_arch { |
245 | struct kvm_cpu_context ctxt; | |
b43b5dd9 DM |
246 | void *sve_state; |
247 | unsigned int sve_max_vl; | |
4f8d6632 MZ |
248 | |
249 | /* HYP configuration */ | |
250 | u64 hcr_el2; | |
56c7f5e7 | 251 | u32 mdcr_el2; |
4f8d6632 MZ |
252 | |
253 | /* Exception Information */ | |
254 | struct kvm_vcpu_fault_info fault; | |
255 | ||
55e3748e MZ |
256 | /* State of various workarounds, see kvm_asm.h for bit assignment */ |
257 | u64 workaround_flags; | |
258 | ||
fa89d31c DM |
259 | /* Miscellaneous vcpu state flags */ |
260 | u64 flags; | |
0c557ed4 | 261 | |
84e690bf AB |
262 | /* |
263 | * We maintain more than a single set of debug registers to support | |
264 | * debugging the guest from the host and to maintain separate host and | |
265 | * guest state during world switches. vcpu_debug_state are the debug | |
266 | * registers of the vcpu as the guest sees them. host_debug_state are | |
834bf887 AB |
267 | * the host registers which are saved and restored during |
268 | * world switches. external_debug_state contains the debug | |
269 | * values we want to debug the guest. This is set via the | |
270 | * KVM_SET_GUEST_DEBUG ioctl. | |
84e690bf AB |
271 | * |
272 | * debug_ptr points to the set of debug registers that should be loaded | |
273 | * onto the hardware when running the guest. | |
274 | */ | |
275 | struct kvm_guest_debug_arch *debug_ptr; | |
276 | struct kvm_guest_debug_arch vcpu_debug_state; | |
834bf887 | 277 | struct kvm_guest_debug_arch external_debug_state; |
84e690bf | 278 | |
4f8d6632 | 279 | /* Pointer to host CPU context */ |
630a1685 | 280 | struct kvm_cpu_context *host_cpu_context; |
e6b673b7 DM |
281 | |
282 | struct thread_info *host_thread_info; /* hyp VA */ | |
283 | struct user_fpsimd_state *host_fpsimd_state; /* hyp VA */ | |
284 | ||
f85279b4 WD |
285 | struct { |
286 | /* {Break,watch}point registers */ | |
287 | struct kvm_guest_debug_arch regs; | |
288 | /* Statistical profiling extension */ | |
289 | u64 pmscr_el1; | |
290 | } host_debug_state; | |
4f8d6632 MZ |
291 | |
292 | /* VGIC state */ | |
293 | struct vgic_cpu vgic_cpu; | |
294 | struct arch_timer_cpu timer_cpu; | |
04fe4726 | 295 | struct kvm_pmu pmu; |
4f8d6632 MZ |
296 | |
297 | /* | |
298 | * Anything that is not used directly from assembly code goes | |
299 | * here. | |
300 | */ | |
4f8d6632 | 301 | |
337b99bf AB |
302 | /* |
303 | * Guest registers we preserve during guest debugging. | |
304 | * | |
305 | * These shadow registers are updated by the kvm_handle_sys_reg | |
306 | * trap handler if the guest accesses or updates them while we | |
307 | * are using guest debug. | |
308 | */ | |
309 | struct { | |
310 | u32 mdscr_el1; | |
311 | } guest_debug_preserved; | |
312 | ||
3781528e EA |
313 | /* vcpu power-off state */ |
314 | bool power_off; | |
4f8d6632 | 315 | |
3b92830a EA |
316 | /* Don't run the guest (internal implementation need) */ |
317 | bool pause; | |
318 | ||
4f8d6632 MZ |
319 | /* IO related fields */ |
320 | struct kvm_decode mmio_decode; | |
321 | ||
4f8d6632 MZ |
322 | /* Cache some mmu pages needed inside spinlock regions */ |
323 | struct kvm_mmu_memory_cache mmu_page_cache; | |
324 | ||
325 | /* Target CPU and feature flags */ | |
6c8c0c4d | 326 | int target; |
4f8d6632 MZ |
327 | DECLARE_BITMAP(features, KVM_VCPU_MAX_FEATURES); |
328 | ||
329 | /* Detect first run of a vcpu */ | |
330 | bool has_run_once; | |
4715c14b JM |
331 | |
332 | /* Virtual SError ESR to restore when HCR_EL2.VSE is set */ | |
333 | u64 vsesr_el2; | |
d47533da | 334 | |
358b28f0 MZ |
335 | /* Additional reset state */ |
336 | struct vcpu_reset_state reset_state; | |
337 | ||
d47533da CD |
338 | /* True when deferrable sysregs are loaded on the physical CPU, |
339 | * see kvm_vcpu_load_sysregs and kvm_vcpu_put_sysregs. */ | |
340 | bool sysregs_loaded_on_cpu; | |
4f8d6632 MZ |
341 | }; |
342 | ||
b43b5dd9 DM |
343 | /* Pointer to the vcpu's SVE FFR for sve_{save,load}_state() */ |
344 | #define vcpu_sve_pffr(vcpu) ((void *)((char *)((vcpu)->arch.sve_state) + \ | |
345 | sve_ffr_offset((vcpu)->arch.sve_max_vl))) | |
346 | ||
e1c9c983 DM |
347 | #define vcpu_sve_state_size(vcpu) ({ \ |
348 | size_t __size_ret; \ | |
349 | unsigned int __vcpu_vq; \ | |
350 | \ | |
351 | if (WARN_ON(!sve_vl_valid((vcpu)->arch.sve_max_vl))) { \ | |
352 | __size_ret = 0; \ | |
353 | } else { \ | |
354 | __vcpu_vq = sve_vq_from_vl((vcpu)->arch.sve_max_vl); \ | |
355 | __size_ret = SVE_SIG_REGS_SIZE(__vcpu_vq); \ | |
356 | } \ | |
357 | \ | |
358 | __size_ret; \ | |
359 | }) | |
360 | ||
fa89d31c DM |
361 | /* vcpu_arch flags field values: */ |
362 | #define KVM_ARM64_DEBUG_DIRTY (1 << 0) | |
e6b673b7 DM |
363 | #define KVM_ARM64_FP_ENABLED (1 << 1) /* guest FP regs loaded */ |
364 | #define KVM_ARM64_FP_HOST (1 << 2) /* host FP regs loaded */ | |
365 | #define KVM_ARM64_HOST_SVE_IN_USE (1 << 3) /* backup for host TIF_SVE */ | |
b3eb56b6 | 366 | #define KVM_ARM64_HOST_SVE_ENABLED (1 << 4) /* SVE enabled for EL0 */ |
1765edba | 367 | #define KVM_ARM64_GUEST_HAS_SVE (1 << 5) /* SVE exposed to guest */ |
9033bba4 | 368 | #define KVM_ARM64_VCPU_SVE_FINALIZED (1 << 6) /* SVE config completed */ |
b890d75c | 369 | #define KVM_ARM64_GUEST_HAS_PTRAUTH (1 << 7) /* PTRAUTH exposed to guest */ |
1765edba DM |
370 | |
371 | #define vcpu_has_sve(vcpu) (system_supports_sve() && \ | |
372 | ((vcpu)->arch.flags & KVM_ARM64_GUEST_HAS_SVE)) | |
fa89d31c | 373 | |
b890d75c ADK |
374 | #define vcpu_has_ptrauth(vcpu) ((system_supports_address_auth() || \ |
375 | system_supports_generic_auth()) && \ | |
376 | ((vcpu)->arch.flags & KVM_ARM64_GUEST_HAS_PTRAUTH)) | |
377 | ||
4f8d6632 | 378 | #define vcpu_gp_regs(v) (&(v)->arch.ctxt.gp_regs) |
8d404c4c CD |
379 | |
380 | /* | |
381 | * Only use __vcpu_sys_reg if you know you want the memory backed version of a | |
382 | * register, and not the one most recently accessed by a running VCPU. For | |
383 | * example, for userspace access or for system registers that are never context | |
384 | * switched, but only emulated. | |
385 | */ | |
386 | #define __vcpu_sys_reg(v,r) ((v)->arch.ctxt.sys_regs[(r)]) | |
387 | ||
da6f1666 | 388 | u64 vcpu_read_sys_reg(const struct kvm_vcpu *vcpu, int reg); |
d47533da | 389 | void vcpu_write_sys_reg(struct kvm_vcpu *vcpu, u64 val, int reg); |
8d404c4c | 390 | |
72564016 MZ |
391 | /* |
392 | * CP14 and CP15 live in the same array, as they are backed by the | |
393 | * same system registers. | |
394 | */ | |
395 | #define vcpu_cp14(v,r) ((v)->arch.ctxt.copro[(r)]) | |
396 | #define vcpu_cp15(v,r) ((v)->arch.ctxt.copro[(r)]) | |
4f8d6632 MZ |
397 | |
398 | struct kvm_vm_stat { | |
8a7e75d4 | 399 | ulong remote_tlb_flush; |
4f8d6632 MZ |
400 | }; |
401 | ||
402 | struct kvm_vcpu_stat { | |
8a7e75d4 SJS |
403 | u64 halt_successful_poll; |
404 | u64 halt_attempted_poll; | |
405 | u64 halt_poll_invalid; | |
406 | u64 halt_wakeup; | |
407 | u64 hvc_exit_stat; | |
b19e6892 AT |
408 | u64 wfe_exit_stat; |
409 | u64 wfi_exit_stat; | |
410 | u64 mmio_exit_user; | |
411 | u64 mmio_exit_kernel; | |
412 | u64 exits; | |
4f8d6632 MZ |
413 | }; |
414 | ||
473bdc0e | 415 | int kvm_vcpu_preferred_target(struct kvm_vcpu_init *init); |
4f8d6632 MZ |
416 | unsigned long kvm_arm_num_regs(struct kvm_vcpu *vcpu); |
417 | int kvm_arm_copy_reg_indices(struct kvm_vcpu *vcpu, u64 __user *indices); | |
4f8d6632 MZ |
418 | int kvm_arm_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg); |
419 | int kvm_arm_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg); | |
539aee0e JM |
420 | int __kvm_arm_vcpu_get_events(struct kvm_vcpu *vcpu, |
421 | struct kvm_vcpu_events *events); | |
b7b27fac | 422 | |
539aee0e JM |
423 | int __kvm_arm_vcpu_set_events(struct kvm_vcpu *vcpu, |
424 | struct kvm_vcpu_events *events); | |
4f8d6632 MZ |
425 | |
426 | #define KVM_ARCH_WANT_MMU_NOTIFIER | |
4f8d6632 MZ |
427 | int kvm_unmap_hva_range(struct kvm *kvm, |
428 | unsigned long start, unsigned long end); | |
748c0e31 | 429 | int kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte); |
35307b9a MZ |
430 | int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end); |
431 | int kvm_test_age_hva(struct kvm *kvm, unsigned long hva); | |
4f8d6632 | 432 | |
4f8d6632 | 433 | struct kvm_vcpu *kvm_arm_get_running_vcpu(void); |
4000be42 | 434 | struct kvm_vcpu * __percpu *kvm_get_running_vcpus(void); |
b13216cf CD |
435 | void kvm_arm_halt_guest(struct kvm *kvm); |
436 | void kvm_arm_resume_guest(struct kvm *kvm); | |
4f8d6632 | 437 | |
a0bf9776 | 438 | u64 __kvm_call_hyp(void *hypfn, ...); |
18fc7bf8 MZ |
439 | |
440 | /* | |
441 | * The couple of isb() below are there to guarantee the same behaviour | |
442 | * on VHE as on !VHE, where the eret to EL1 acts as a context | |
443 | * synchronization event. | |
444 | */ | |
445 | #define kvm_call_hyp(f, ...) \ | |
446 | do { \ | |
447 | if (has_vhe()) { \ | |
448 | f(__VA_ARGS__); \ | |
449 | isb(); \ | |
450 | } else { \ | |
451 | __kvm_call_hyp(kvm_ksym_ref(f), ##__VA_ARGS__); \ | |
452 | } \ | |
453 | } while(0) | |
454 | ||
455 | #define kvm_call_hyp_ret(f, ...) \ | |
456 | ({ \ | |
457 | typeof(f(__VA_ARGS__)) ret; \ | |
458 | \ | |
459 | if (has_vhe()) { \ | |
460 | ret = f(__VA_ARGS__); \ | |
461 | isb(); \ | |
462 | } else { \ | |
463 | ret = __kvm_call_hyp(kvm_ksym_ref(f), \ | |
464 | ##__VA_ARGS__); \ | |
465 | } \ | |
466 | \ | |
467 | ret; \ | |
468 | }) | |
22b39ca3 | 469 | |
cf5d3188 | 470 | void force_vm_exit(const cpumask_t *mask); |
8199ed0e | 471 | void kvm_mmu_wp_memory_region(struct kvm *kvm, int slot); |
4f8d6632 MZ |
472 | |
473 | int handle_exit(struct kvm_vcpu *vcpu, struct kvm_run *run, | |
474 | int exception_index); | |
3368bd80 JM |
475 | void handle_exit_early(struct kvm_vcpu *vcpu, struct kvm_run *run, |
476 | int exception_index); | |
4f8d6632 MZ |
477 | |
478 | int kvm_perf_init(void); | |
479 | int kvm_perf_teardown(void); | |
480 | ||
b7b27fac DG |
481 | void kvm_set_sei_esr(struct kvm_vcpu *vcpu, u64 syndrome); |
482 | ||
4429fc64 AP |
483 | struct kvm_vcpu *kvm_mpidr_to_vcpu(struct kvm *kvm, unsigned long mpidr); |
484 | ||
630a1685 | 485 | DECLARE_PER_CPU(kvm_host_data_t, kvm_host_data); |
4464e210 | 486 | |
630a1685 | 487 | static inline void kvm_init_host_cpu_context(struct kvm_cpu_context *cpu_ctxt, |
32f13955 MZ |
488 | int cpu) |
489 | { | |
490 | /* The host's MPIDR is immutable, so let's set it up at boot time */ | |
491 | cpu_ctxt->sys_regs[MPIDR_EL1] = cpu_logical_map(cpu); | |
492 | } | |
493 | ||
7c36447a WD |
494 | void __kvm_enable_ssbs(void); |
495 | ||
12fda812 | 496 | static inline void __cpu_init_hyp_mode(phys_addr_t pgd_ptr, |
092bd143 MZ |
497 | unsigned long hyp_stack_ptr, |
498 | unsigned long vector_ptr) | |
499 | { | |
9bc03f1d MZ |
500 | /* |
501 | * Calculate the raw per-cpu offset without a translation from the | |
502 | * kernel's mapping to the linear mapping, and store it in tpidr_el2 | |
503 | * so that we can use adr_l to access per-cpu variables in EL2. | |
504 | */ | |
630a1685 AM |
505 | u64 tpidr_el2 = ((u64)this_cpu_ptr(&kvm_host_data) - |
506 | (u64)kvm_ksym_ref(kvm_host_data)); | |
4464e210 | 507 | |
092bd143 | 508 | /* |
63a1e1c9 MR |
509 | * Call initialization code, and switch to the full blown HYP code. |
510 | * If the cpucaps haven't been finalized yet, something has gone very | |
511 | * wrong, and hyp will crash and burn when it uses any | |
512 | * cpus_have_const_cap() wrapper. | |
092bd143 | 513 | */ |
63a1e1c9 | 514 | BUG_ON(!static_branch_likely(&arm64_const_caps_ready)); |
9bc03f1d | 515 | __kvm_call_hyp((void *)pgd_ptr, hyp_stack_ptr, vector_ptr, tpidr_el2); |
7c36447a WD |
516 | |
517 | /* | |
518 | * Disabling SSBD on a non-VHE system requires us to enable SSBS | |
519 | * at EL2. | |
520 | */ | |
521 | if (!has_vhe() && this_cpu_has_cap(ARM64_SSBS) && | |
522 | arm64_get_ssbd_state() == ARM64_SSBD_FORCE_DISABLE) { | |
523 | kvm_call_hyp(__kvm_enable_ssbs); | |
524 | } | |
092bd143 | 525 | } |
67f69197 | 526 | |
33e5f4e5 | 527 | static inline bool kvm_arch_requires_vhe(void) |
85acda3b DM |
528 | { |
529 | /* | |
530 | * The Arm architecture specifies that implementation of SVE | |
531 | * requires VHE also to be implemented. The KVM code for arm64 | |
532 | * relies on this when SVE is present: | |
533 | */ | |
534 | if (system_supports_sve()) | |
85acda3b | 535 | return true; |
33e5f4e5 | 536 | |
8b2cca9a MZ |
537 | /* Some implementations have defects that confine them to VHE */ |
538 | if (cpus_have_cap(ARM64_WORKAROUND_1165522)) | |
539 | return true; | |
540 | ||
33e5f4e5 | 541 | return false; |
85acda3b DM |
542 | } |
543 | ||
384b40ca MR |
544 | void kvm_arm_vcpu_ptrauth_trap(struct kvm_vcpu *vcpu); |
545 | ||
0865e636 RK |
546 | static inline void kvm_arch_hardware_unsetup(void) {} |
547 | static inline void kvm_arch_sync_events(struct kvm *kvm) {} | |
0865e636 | 548 | static inline void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu) {} |
3491caf2 | 549 | static inline void kvm_arch_vcpu_block_finish(struct kvm_vcpu *vcpu) {} |
0865e636 | 550 | |
56c7f5e7 AB |
551 | void kvm_arm_init_debug(void); |
552 | void kvm_arm_setup_debug(struct kvm_vcpu *vcpu); | |
553 | void kvm_arm_clear_debug(struct kvm_vcpu *vcpu); | |
84e690bf | 554 | void kvm_arm_reset_debug_ptr(struct kvm_vcpu *vcpu); |
bb0c70bc SZ |
555 | int kvm_arm_vcpu_arch_set_attr(struct kvm_vcpu *vcpu, |
556 | struct kvm_device_attr *attr); | |
557 | int kvm_arm_vcpu_arch_get_attr(struct kvm_vcpu *vcpu, | |
558 | struct kvm_device_attr *attr); | |
559 | int kvm_arm_vcpu_arch_has_attr(struct kvm_vcpu *vcpu, | |
560 | struct kvm_device_attr *attr); | |
56c7f5e7 | 561 | |
0f62f0e9 | 562 | static inline void __cpu_init_stage2(void) {} |
21a4179c | 563 | |
e6b673b7 DM |
564 | /* Guest/host FPSIMD coordination helpers */ |
565 | int kvm_arch_vcpu_run_map_fp(struct kvm_vcpu *vcpu); | |
566 | void kvm_arch_vcpu_load_fp(struct kvm_vcpu *vcpu); | |
567 | void kvm_arch_vcpu_ctxsync_fp(struct kvm_vcpu *vcpu); | |
568 | void kvm_arch_vcpu_put_fp(struct kvm_vcpu *vcpu); | |
569 | ||
eb41238c AM |
570 | static inline bool kvm_pmu_counter_deferred(struct perf_event_attr *attr) |
571 | { | |
435e53fb | 572 | return (!has_vhe() && attr->exclude_host); |
eb41238c AM |
573 | } |
574 | ||
e6b673b7 DM |
575 | #ifdef CONFIG_KVM /* Avoid conflicts with core headers if CONFIG_KVM=n */ |
576 | static inline int kvm_arch_vcpu_run_pid_change(struct kvm_vcpu *vcpu) | |
17eed27b | 577 | { |
e6b673b7 | 578 | return kvm_arch_vcpu_run_map_fp(vcpu); |
17eed27b | 579 | } |
eb41238c AM |
580 | |
581 | void kvm_set_pmu_events(u32 set, struct perf_event_attr *attr); | |
582 | void kvm_clr_pmu_events(u32 clr); | |
3d91befb | 583 | |
435e53fb AM |
584 | void kvm_vcpu_pmu_restore_guest(struct kvm_vcpu *vcpu); |
585 | void kvm_vcpu_pmu_restore_host(struct kvm_vcpu *vcpu); | |
eb41238c AM |
586 | #else |
587 | static inline void kvm_set_pmu_events(u32 set, struct perf_event_attr *attr) {} | |
588 | static inline void kvm_clr_pmu_events(u32 clr) {} | |
e6b673b7 | 589 | #endif |
17eed27b | 590 | |
4f5abad9 JM |
591 | static inline void kvm_arm_vhe_guest_enter(void) |
592 | { | |
593 | local_daif_mask(); | |
85738e05 JT |
594 | |
595 | /* | |
596 | * Having IRQs masked via PMR when entering the guest means the GIC | |
597 | * will not signal the CPU of interrupts of lower priority, and the | |
598 | * only way to get out will be via guest exceptions. | |
599 | * Naturally, we want to avoid this. | |
600 | */ | |
601 | if (system_uses_irq_prio_masking()) { | |
602 | gic_write_pmr(GIC_PRIO_IRQON); | |
603 | dsb(sy); | |
604 | } | |
4f5abad9 JM |
605 | } |
606 | ||
607 | static inline void kvm_arm_vhe_guest_exit(void) | |
608 | { | |
85738e05 JT |
609 | /* |
610 | * local_daif_restore() takes care to properly restore PSTATE.DAIF | |
611 | * and the GIC PMR if the host is using IRQ priorities. | |
612 | */ | |
4f5abad9 | 613 | local_daif_restore(DAIF_PROCCTX_NOIRQ); |
3f5c90b8 CD |
614 | |
615 | /* | |
616 | * When we exit from the guest we change a number of CPU configuration | |
617 | * parameters, such as traps. Make sure these changes take effect | |
618 | * before running the host or additional guests. | |
619 | */ | |
620 | isb(); | |
4f5abad9 | 621 | } |
6167ec5c MZ |
622 | |
623 | static inline bool kvm_arm_harden_branch_predictor(void) | |
624 | { | |
625 | return cpus_have_const_cap(ARM64_HARDEN_BRANCH_PREDICTOR); | |
626 | } | |
627 | ||
5d81f7dc MZ |
628 | #define KVM_SSBD_UNKNOWN -1 |
629 | #define KVM_SSBD_FORCE_DISABLE 0 | |
630 | #define KVM_SSBD_KERNEL 1 | |
631 | #define KVM_SSBD_FORCE_ENABLE 2 | |
632 | #define KVM_SSBD_MITIGATED 3 | |
633 | ||
634 | static inline int kvm_arm_have_ssbd(void) | |
635 | { | |
636 | switch (arm64_get_ssbd_state()) { | |
637 | case ARM64_SSBD_FORCE_DISABLE: | |
638 | return KVM_SSBD_FORCE_DISABLE; | |
639 | case ARM64_SSBD_KERNEL: | |
640 | return KVM_SSBD_KERNEL; | |
641 | case ARM64_SSBD_FORCE_ENABLE: | |
642 | return KVM_SSBD_FORCE_ENABLE; | |
643 | case ARM64_SSBD_MITIGATED: | |
644 | return KVM_SSBD_MITIGATED; | |
645 | case ARM64_SSBD_UNKNOWN: | |
646 | default: | |
647 | return KVM_SSBD_UNKNOWN; | |
648 | } | |
649 | } | |
650 | ||
bc192cee CD |
651 | void kvm_vcpu_load_sysregs(struct kvm_vcpu *vcpu); |
652 | void kvm_vcpu_put_sysregs(struct kvm_vcpu *vcpu); | |
653 | ||
0f62f0e9 SP |
654 | void kvm_set_ipa_limit(void); |
655 | ||
d1e5b0e9 MO |
656 | #define __KVM_HAVE_ARCH_VM_ALLOC |
657 | struct kvm *kvm_arch_alloc_vm(void); | |
658 | void kvm_arch_free_vm(struct kvm *kvm); | |
659 | ||
bca607eb | 660 | int kvm_arm_setup_stage2(struct kvm *kvm, unsigned long type); |
5b6c6742 | 661 | |
92e68b2b | 662 | int kvm_arm_vcpu_finalize(struct kvm_vcpu *vcpu, int feature); |
9033bba4 DM |
663 | bool kvm_arm_vcpu_is_finalized(struct kvm_vcpu *vcpu); |
664 | ||
665 | #define kvm_arm_vcpu_sve_finalized(vcpu) \ | |
666 | ((vcpu)->arch.flags & KVM_ARM64_VCPU_SVE_FINALIZED) | |
7dd32a0d | 667 | |
4f8d6632 | 668 | #endif /* __ARM64_KVM_HOST_H__ */ |