2 * Copyright (C) 2012,2013 - ARM Ltd
3 * Author: Marc Zyngier <marc.zyngier@arm.com>
5 * Derived from arch/arm/include/asm/kvm_host.h:
6 * Copyright (C) 2012 - Virtual Open Systems and Columbia University
7 * Author: Christoffer Dall <c.dall@virtualopensystems.com>
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with this program. If not, see <http://www.gnu.org/licenses/>.
22 #ifndef __ARM64_KVM_HOST_H__
23 #define __ARM64_KVM_HOST_H__
25 #include <linux/types.h>
26 #include <linux/kvm_types.h>
27 #include <asm/cpufeature.h>
28 #include <asm/daifflags.h>
29 #include <asm/fpsimd.h>
31 #include <asm/kvm_asm.h>
32 #include <asm/kvm_mmio.h>
33 #include <asm/smp_plat.h>
34 #include <asm/thread_info.h>
36 #define __KVM_HAVE_ARCH_INTC_INITIALIZED
38 #define KVM_USER_MEM_SLOTS 512
39 #define KVM_HALT_POLL_NS_DEFAULT 500000
41 #include <kvm/arm_vgic.h>
42 #include <kvm/arm_arch_timer.h>
43 #include <kvm/arm_pmu.h>
45 #define KVM_MAX_VCPUS VGIC_V3_MAX_CPUS
47 #define KVM_VCPU_MAX_FEATURES 4
49 #define KVM_REQ_SLEEP \
50 KVM_ARCH_REQ_FLAGS(0, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
51 #define KVM_REQ_IRQ_PENDING KVM_ARCH_REQ(1)
53 DECLARE_STATIC_KEY_FALSE(userspace_irqchip_in_use);
55 int __attribute_const__ kvm_target_cpu(void);
56 int kvm_reset_vcpu(struct kvm_vcpu *vcpu);
57 int kvm_arch_vm_ioctl_check_extension(struct kvm *kvm, long ext);
58 void __extended_idmap_trampoline(phys_addr_t boot_pgd, phys_addr_t idmap_start);
61 /* The VMID generation used for the virt. memory system */
69 /* stage2 entry level table */
73 /* VTCR_EL2 value for this VM */
76 /* The last vcpu id that ran on each physical CPU */
77 int __percpu *last_vcpu_ran;
79 /* The maximum number of vCPUs depends on the used GIC model */
82 /* Interrupt controller */
83 struct vgic_dist vgic;
85 /* Mandated version of PSCI */
89 #define KVM_NR_MEM_OBJS 40
92 * We don't want allocation failures within the mmu code, so we preallocate
93 * enough memory for a single page fault in a cache.
95 struct kvm_mmu_memory_cache {
97 void *objects[KVM_NR_MEM_OBJS];
100 struct kvm_vcpu_fault_info {
101 u32 esr_el2; /* Hyp Syndrom Register */
102 u64 far_el2; /* Hyp Fault Address Register */
103 u64 hpfar_el2; /* Hyp IPA Fault Address Register */
104 u64 disr_el1; /* Deferred [SError] Status Register */
108 * 0 is reserved as an invalid value.
109 * Order should be kept in sync with the save/restore code.
113 MPIDR_EL1, /* MultiProcessor Affinity Register */
114 CSSELR_EL1, /* Cache Size Selection Register */
115 SCTLR_EL1, /* System Control Register */
116 ACTLR_EL1, /* Auxiliary Control Register */
117 CPACR_EL1, /* Coprocessor Access Control */
118 TTBR0_EL1, /* Translation Table Base Register 0 */
119 TTBR1_EL1, /* Translation Table Base Register 1 */
120 TCR_EL1, /* Translation Control Register */
121 ESR_EL1, /* Exception Syndrome Register */
122 AFSR0_EL1, /* Auxiliary Fault Status Register 0 */
123 AFSR1_EL1, /* Auxiliary Fault Status Register 1 */
124 FAR_EL1, /* Fault Address Register */
125 MAIR_EL1, /* Memory Attribute Indirection Register */
126 VBAR_EL1, /* Vector Base Address Register */
127 CONTEXTIDR_EL1, /* Context ID Register */
128 TPIDR_EL0, /* Thread ID, User R/W */
129 TPIDRRO_EL0, /* Thread ID, User R/O */
130 TPIDR_EL1, /* Thread ID, Privileged */
131 AMAIR_EL1, /* Aux Memory Attribute Indirection Register */
132 CNTKCTL_EL1, /* Timer Control Register (EL1) */
133 PAR_EL1, /* Physical Address Register */
134 MDSCR_EL1, /* Monitor Debug System Control Register */
135 MDCCINT_EL1, /* Monitor Debug Comms Channel Interrupt Enable Reg */
136 DISR_EL1, /* Deferred Interrupt Status Register */
138 /* Performance Monitors Registers */
139 PMCR_EL0, /* Control Register */
140 PMSELR_EL0, /* Event Counter Selection Register */
141 PMEVCNTR0_EL0, /* Event Counter Register (0-30) */
142 PMEVCNTR30_EL0 = PMEVCNTR0_EL0 + 30,
143 PMCCNTR_EL0, /* Cycle Counter Register */
144 PMEVTYPER0_EL0, /* Event Type Register (0-30) */
145 PMEVTYPER30_EL0 = PMEVTYPER0_EL0 + 30,
146 PMCCFILTR_EL0, /* Cycle Count Filter Register */
147 PMCNTENSET_EL0, /* Count Enable Set Register */
148 PMINTENSET_EL1, /* Interrupt Enable Set Register */
149 PMOVSSET_EL0, /* Overflow Flag Status Set Register */
150 PMSWINC_EL0, /* Software Increment Register */
151 PMUSERENR_EL0, /* User Enable Register */
153 /* 32bit specific registers. Keep them at the end of the range */
154 DACR32_EL2, /* Domain Access Control Register */
155 IFSR32_EL2, /* Instruction Fault Status Register */
156 FPEXC32_EL2, /* Floating-Point Exception Control Register */
157 DBGVCR32_EL2, /* Debug Vector Catch Register */
159 NR_SYS_REGS /* Nothing after this line! */
163 #define c0_MPIDR (MPIDR_EL1 * 2) /* MultiProcessor ID Register */
164 #define c0_CSSELR (CSSELR_EL1 * 2)/* Cache Size Selection Register */
165 #define c1_SCTLR (SCTLR_EL1 * 2) /* System Control Register */
166 #define c1_ACTLR (ACTLR_EL1 * 2) /* Auxiliary Control Register */
167 #define c1_CPACR (CPACR_EL1 * 2) /* Coprocessor Access Control */
168 #define c2_TTBR0 (TTBR0_EL1 * 2) /* Translation Table Base Register 0 */
169 #define c2_TTBR0_high (c2_TTBR0 + 1) /* TTBR0 top 32 bits */
170 #define c2_TTBR1 (TTBR1_EL1 * 2) /* Translation Table Base Register 1 */
171 #define c2_TTBR1_high (c2_TTBR1 + 1) /* TTBR1 top 32 bits */
172 #define c2_TTBCR (TCR_EL1 * 2) /* Translation Table Base Control R. */
173 #define c3_DACR (DACR32_EL2 * 2)/* Domain Access Control Register */
174 #define c5_DFSR (ESR_EL1 * 2) /* Data Fault Status Register */
175 #define c5_IFSR (IFSR32_EL2 * 2)/* Instruction Fault Status Register */
176 #define c5_ADFSR (AFSR0_EL1 * 2) /* Auxiliary Data Fault Status R */
177 #define c5_AIFSR (AFSR1_EL1 * 2) /* Auxiliary Instr Fault Status R */
178 #define c6_DFAR (FAR_EL1 * 2) /* Data Fault Address Register */
179 #define c6_IFAR (c6_DFAR + 1) /* Instruction Fault Address Register */
180 #define c7_PAR (PAR_EL1 * 2) /* Physical Address Register */
181 #define c7_PAR_high (c7_PAR + 1) /* PAR top 32 bits */
182 #define c10_PRRR (MAIR_EL1 * 2) /* Primary Region Remap Register */
183 #define c10_NMRR (c10_PRRR + 1) /* Normal Memory Remap Register */
184 #define c12_VBAR (VBAR_EL1 * 2) /* Vector Base Address Register */
185 #define c13_CID (CONTEXTIDR_EL1 * 2) /* Context ID Register */
186 #define c13_TID_URW (TPIDR_EL0 * 2) /* Thread ID, User R/W */
187 #define c13_TID_URO (TPIDRRO_EL0 * 2)/* Thread ID, User R/O */
188 #define c13_TID_PRIV (TPIDR_EL1 * 2) /* Thread ID, Privileged */
189 #define c10_AMAIR0 (AMAIR_EL1 * 2) /* Aux Memory Attr Indirection Reg */
190 #define c10_AMAIR1 (c10_AMAIR0 + 1)/* Aux Memory Attr Indirection Reg */
191 #define c14_CNTKCTL (CNTKCTL_EL1 * 2) /* Timer Control Register (PL1) */
193 #define cp14_DBGDSCRext (MDSCR_EL1 * 2)
194 #define cp14_DBGBCR0 (DBGBCR0_EL1 * 2)
195 #define cp14_DBGBVR0 (DBGBVR0_EL1 * 2)
196 #define cp14_DBGBXVR0 (cp14_DBGBVR0 + 1)
197 #define cp14_DBGWCR0 (DBGWCR0_EL1 * 2)
198 #define cp14_DBGWVR0 (DBGWVR0_EL1 * 2)
199 #define cp14_DBGDCCINT (MDCCINT_EL1 * 2)
201 #define NR_COPRO_REGS (NR_SYS_REGS * 2)
203 struct kvm_cpu_context {
204 struct kvm_regs gp_regs;
206 u64 sys_regs[NR_SYS_REGS];
207 u32 copro[NR_COPRO_REGS];
210 struct kvm_vcpu *__hyp_running_vcpu;
213 typedef struct kvm_cpu_context kvm_cpu_context_t;
215 struct kvm_vcpu_arch {
216 struct kvm_cpu_context ctxt;
218 /* HYP configuration */
222 /* Exception Information */
223 struct kvm_vcpu_fault_info fault;
225 /* State of various workarounds, see kvm_asm.h for bit assignment */
226 u64 workaround_flags;
228 /* Miscellaneous vcpu state flags */
232 * We maintain more than a single set of debug registers to support
233 * debugging the guest from the host and to maintain separate host and
234 * guest state during world switches. vcpu_debug_state are the debug
235 * registers of the vcpu as the guest sees them. host_debug_state are
236 * the host registers which are saved and restored during
237 * world switches. external_debug_state contains the debug
238 * values we want to debug the guest. This is set via the
239 * KVM_SET_GUEST_DEBUG ioctl.
241 * debug_ptr points to the set of debug registers that should be loaded
242 * onto the hardware when running the guest.
244 struct kvm_guest_debug_arch *debug_ptr;
245 struct kvm_guest_debug_arch vcpu_debug_state;
246 struct kvm_guest_debug_arch external_debug_state;
248 /* Pointer to host CPU context */
249 kvm_cpu_context_t *host_cpu_context;
251 struct thread_info *host_thread_info; /* hyp VA */
252 struct user_fpsimd_state *host_fpsimd_state; /* hyp VA */
255 /* {Break,watch}point registers */
256 struct kvm_guest_debug_arch regs;
257 /* Statistical profiling extension */
262 struct vgic_cpu vgic_cpu;
263 struct arch_timer_cpu timer_cpu;
267 * Anything that is not used directly from assembly code goes
272 * Guest registers we preserve during guest debugging.
274 * These shadow registers are updated by the kvm_handle_sys_reg
275 * trap handler if the guest accesses or updates them while we
276 * are using guest debug.
280 } guest_debug_preserved;
282 /* vcpu power-off state */
285 /* Don't run the guest (internal implementation need) */
288 /* IO related fields */
289 struct kvm_decode mmio_decode;
291 /* Cache some mmu pages needed inside spinlock regions */
292 struct kvm_mmu_memory_cache mmu_page_cache;
294 /* Target CPU and feature flags */
296 DECLARE_BITMAP(features, KVM_VCPU_MAX_FEATURES);
298 /* Detect first run of a vcpu */
301 /* Virtual SError ESR to restore when HCR_EL2.VSE is set */
304 /* True when deferrable sysregs are loaded on the physical CPU,
305 * see kvm_vcpu_load_sysregs and kvm_vcpu_put_sysregs. */
306 bool sysregs_loaded_on_cpu;
309 /* vcpu_arch flags field values: */
310 #define KVM_ARM64_DEBUG_DIRTY (1 << 0)
311 #define KVM_ARM64_FP_ENABLED (1 << 1) /* guest FP regs loaded */
312 #define KVM_ARM64_FP_HOST (1 << 2) /* host FP regs loaded */
313 #define KVM_ARM64_HOST_SVE_IN_USE (1 << 3) /* backup for host TIF_SVE */
314 #define KVM_ARM64_HOST_SVE_ENABLED (1 << 4) /* SVE enabled for EL0 */
316 #define vcpu_gp_regs(v) (&(v)->arch.ctxt.gp_regs)
319 * Only use __vcpu_sys_reg if you know you want the memory backed version of a
320 * register, and not the one most recently accessed by a running VCPU. For
321 * example, for userspace access or for system registers that are never context
322 * switched, but only emulated.
324 #define __vcpu_sys_reg(v,r) ((v)->arch.ctxt.sys_regs[(r)])
326 u64 vcpu_read_sys_reg(const struct kvm_vcpu *vcpu, int reg);
327 void vcpu_write_sys_reg(struct kvm_vcpu *vcpu, u64 val, int reg);
330 * CP14 and CP15 live in the same array, as they are backed by the
331 * same system registers.
333 #define vcpu_cp14(v,r) ((v)->arch.ctxt.copro[(r)])
334 #define vcpu_cp15(v,r) ((v)->arch.ctxt.copro[(r)])
337 ulong remote_tlb_flush;
340 struct kvm_vcpu_stat {
341 u64 halt_successful_poll;
342 u64 halt_attempted_poll;
343 u64 halt_poll_invalid;
349 u64 mmio_exit_kernel;
353 int kvm_vcpu_preferred_target(struct kvm_vcpu_init *init);
354 unsigned long kvm_arm_num_regs(struct kvm_vcpu *vcpu);
355 int kvm_arm_copy_reg_indices(struct kvm_vcpu *vcpu, u64 __user *indices);
356 int kvm_arm_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg);
357 int kvm_arm_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg);
358 int __kvm_arm_vcpu_get_events(struct kvm_vcpu *vcpu,
359 struct kvm_vcpu_events *events);
361 int __kvm_arm_vcpu_set_events(struct kvm_vcpu *vcpu,
362 struct kvm_vcpu_events *events);
364 #define KVM_ARCH_WANT_MMU_NOTIFIER
365 int kvm_unmap_hva_range(struct kvm *kvm,
366 unsigned long start, unsigned long end);
367 int kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte);
368 int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end);
369 int kvm_test_age_hva(struct kvm *kvm, unsigned long hva);
371 struct kvm_vcpu *kvm_arm_get_running_vcpu(void);
372 struct kvm_vcpu * __percpu *kvm_get_running_vcpus(void);
373 void kvm_arm_halt_guest(struct kvm *kvm);
374 void kvm_arm_resume_guest(struct kvm *kvm);
376 u64 __kvm_call_hyp(void *hypfn, ...);
379 * The couple of isb() below are there to guarantee the same behaviour
380 * on VHE as on !VHE, where the eret to EL1 acts as a context
381 * synchronization event.
383 #define kvm_call_hyp(f, ...) \
389 __kvm_call_hyp(kvm_ksym_ref(f), ##__VA_ARGS__); \
393 #define kvm_call_hyp_ret(f, ...) \
395 typeof(f(__VA_ARGS__)) ret; \
398 ret = f(__VA_ARGS__); \
401 ret = __kvm_call_hyp(kvm_ksym_ref(f), \
408 void force_vm_exit(const cpumask_t *mask);
409 void kvm_mmu_wp_memory_region(struct kvm *kvm, int slot);
411 int handle_exit(struct kvm_vcpu *vcpu, struct kvm_run *run,
412 int exception_index);
413 void handle_exit_early(struct kvm_vcpu *vcpu, struct kvm_run *run,
414 int exception_index);
416 int kvm_perf_init(void);
417 int kvm_perf_teardown(void);
419 void kvm_set_sei_esr(struct kvm_vcpu *vcpu, u64 syndrome);
421 struct kvm_vcpu *kvm_mpidr_to_vcpu(struct kvm *kvm, unsigned long mpidr);
423 DECLARE_PER_CPU(kvm_cpu_context_t, kvm_host_cpu_state);
425 static inline void kvm_init_host_cpu_context(kvm_cpu_context_t *cpu_ctxt,
428 /* The host's MPIDR is immutable, so let's set it up at boot time */
429 cpu_ctxt->sys_regs[MPIDR_EL1] = cpu_logical_map(cpu);
432 void __kvm_enable_ssbs(void);
434 static inline void __cpu_init_hyp_mode(phys_addr_t pgd_ptr,
435 unsigned long hyp_stack_ptr,
436 unsigned long vector_ptr)
439 * Calculate the raw per-cpu offset without a translation from the
440 * kernel's mapping to the linear mapping, and store it in tpidr_el2
441 * so that we can use adr_l to access per-cpu variables in EL2.
443 u64 tpidr_el2 = ((u64)this_cpu_ptr(&kvm_host_cpu_state) -
444 (u64)kvm_ksym_ref(kvm_host_cpu_state));
447 * Call initialization code, and switch to the full blown HYP code.
448 * If the cpucaps haven't been finalized yet, something has gone very
449 * wrong, and hyp will crash and burn when it uses any
450 * cpus_have_const_cap() wrapper.
452 BUG_ON(!static_branch_likely(&arm64_const_caps_ready));
453 __kvm_call_hyp((void *)pgd_ptr, hyp_stack_ptr, vector_ptr, tpidr_el2);
456 * Disabling SSBD on a non-VHE system requires us to enable SSBS
459 if (!has_vhe() && this_cpu_has_cap(ARM64_SSBS) &&
460 arm64_get_ssbd_state() == ARM64_SSBD_FORCE_DISABLE) {
461 kvm_call_hyp(__kvm_enable_ssbs);
465 static inline bool kvm_arch_requires_vhe(void)
468 * The Arm architecture specifies that implementation of SVE
469 * requires VHE also to be implemented. The KVM code for arm64
470 * relies on this when SVE is present:
472 if (system_supports_sve())
475 /* Some implementations have defects that confine them to VHE */
476 if (cpus_have_cap(ARM64_WORKAROUND_1165522))
482 static inline void kvm_arch_hardware_unsetup(void) {}
483 static inline void kvm_arch_sync_events(struct kvm *kvm) {}
484 static inline void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu) {}
485 static inline void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu) {}
486 static inline void kvm_arch_vcpu_block_finish(struct kvm_vcpu *vcpu) {}
488 void kvm_arm_init_debug(void);
489 void kvm_arm_setup_debug(struct kvm_vcpu *vcpu);
490 void kvm_arm_clear_debug(struct kvm_vcpu *vcpu);
491 void kvm_arm_reset_debug_ptr(struct kvm_vcpu *vcpu);
492 int kvm_arm_vcpu_arch_set_attr(struct kvm_vcpu *vcpu,
493 struct kvm_device_attr *attr);
494 int kvm_arm_vcpu_arch_get_attr(struct kvm_vcpu *vcpu,
495 struct kvm_device_attr *attr);
496 int kvm_arm_vcpu_arch_has_attr(struct kvm_vcpu *vcpu,
497 struct kvm_device_attr *attr);
499 static inline void __cpu_init_stage2(void) {}
501 /* Guest/host FPSIMD coordination helpers */
502 int kvm_arch_vcpu_run_map_fp(struct kvm_vcpu *vcpu);
503 void kvm_arch_vcpu_load_fp(struct kvm_vcpu *vcpu);
504 void kvm_arch_vcpu_ctxsync_fp(struct kvm_vcpu *vcpu);
505 void kvm_arch_vcpu_put_fp(struct kvm_vcpu *vcpu);
507 #ifdef CONFIG_KVM /* Avoid conflicts with core headers if CONFIG_KVM=n */
508 static inline int kvm_arch_vcpu_run_pid_change(struct kvm_vcpu *vcpu)
510 return kvm_arch_vcpu_run_map_fp(vcpu);
514 static inline void kvm_arm_vhe_guest_enter(void)
519 static inline void kvm_arm_vhe_guest_exit(void)
521 local_daif_restore(DAIF_PROCCTX_NOIRQ);
524 * When we exit from the guest we change a number of CPU configuration
525 * parameters, such as traps. Make sure these changes take effect
526 * before running the host or additional guests.
531 static inline bool kvm_arm_harden_branch_predictor(void)
533 return cpus_have_const_cap(ARM64_HARDEN_BRANCH_PREDICTOR);
536 #define KVM_SSBD_UNKNOWN -1
537 #define KVM_SSBD_FORCE_DISABLE 0
538 #define KVM_SSBD_KERNEL 1
539 #define KVM_SSBD_FORCE_ENABLE 2
540 #define KVM_SSBD_MITIGATED 3
542 static inline int kvm_arm_have_ssbd(void)
544 switch (arm64_get_ssbd_state()) {
545 case ARM64_SSBD_FORCE_DISABLE:
546 return KVM_SSBD_FORCE_DISABLE;
547 case ARM64_SSBD_KERNEL:
548 return KVM_SSBD_KERNEL;
549 case ARM64_SSBD_FORCE_ENABLE:
550 return KVM_SSBD_FORCE_ENABLE;
551 case ARM64_SSBD_MITIGATED:
552 return KVM_SSBD_MITIGATED;
553 case ARM64_SSBD_UNKNOWN:
555 return KVM_SSBD_UNKNOWN;
559 void kvm_vcpu_load_sysregs(struct kvm_vcpu *vcpu);
560 void kvm_vcpu_put_sysregs(struct kvm_vcpu *vcpu);
562 void kvm_set_ipa_limit(void);
564 #define __KVM_HAVE_ARCH_VM_ALLOC
565 struct kvm *kvm_arch_alloc_vm(void);
566 void kvm_arch_free_vm(struct kvm *kvm);
568 int kvm_arm_setup_stage2(struct kvm *kvm, unsigned long type);
570 #endif /* __ARM64_KVM_HOST_H__ */