2 * Copyright (C) 2012 - Virtual Open Systems and Columbia University
3 * Author: Christoffer Dall <c.dall@virtualopensystems.com>
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License, version 2, as
7 * published by the Free Software Foundation.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
19 #ifndef __ARM_KVM_HOST_H__
20 #define __ARM_KVM_HOST_H__
22 #include <linux/types.h>
23 #include <linux/kvm_types.h>
24 #include <asm/cputype.h>
26 #include <asm/kvm_asm.h>
27 #include <asm/kvm_mmio.h>
28 #include <asm/fpstate.h>
29 #include <asm/smp_plat.h>
30 #include <kvm/arm_arch_timer.h>
32 #define __KVM_HAVE_ARCH_INTC_INITIALIZED
34 #define KVM_USER_MEM_SLOTS 32
35 #define KVM_HAVE_ONE_REG
36 #define KVM_HALT_POLL_NS_DEFAULT 500000
38 #define KVM_VCPU_MAX_FEATURES 2
40 #include <kvm/arm_vgic.h>
43 #ifdef CONFIG_ARM_GIC_V3
44 #define KVM_MAX_VCPUS VGIC_V3_MAX_CPUS
46 #define KVM_MAX_VCPUS VGIC_V2_MAX_CPUS
49 #define KVM_REQ_SLEEP \
50 KVM_ARCH_REQ_FLAGS(0, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
51 #define KVM_REQ_IRQ_PENDING KVM_ARCH_REQ(1)
53 DECLARE_STATIC_KEY_FALSE(userspace_irqchip_in_use);
55 u32 *kvm_vcpu_reg(struct kvm_vcpu *vcpu, u8 reg_num, u32 mode);
56 int __attribute_const__ kvm_target_cpu(void);
57 int kvm_reset_vcpu(struct kvm_vcpu *vcpu);
58 void kvm_reset_coprocs(struct kvm_vcpu *vcpu);
61 /* The VMID generation used for the virt. memory system */
67 /* The last vcpu id that ran on each physical CPU */
68 int __percpu *last_vcpu_ran;
71 * Anything that is not used directly from assembly code goes
75 /* The VMID generation used for the virt. memory system */
78 /* Stage-2 page table */
82 /* Interrupt controller */
83 struct vgic_dist vgic;
86 /* Mandated version of PSCI */
90 #define KVM_NR_MEM_OBJS 40
93 * We don't want allocation failures within the mmu code, so we preallocate
94 * enough memory for a single page fault in a cache.
96 struct kvm_mmu_memory_cache {
98 void *objects[KVM_NR_MEM_OBJS];
101 struct kvm_vcpu_fault_info {
102 u32 hsr; /* Hyp Syndrome Register */
103 u32 hxfar; /* Hyp Data/Inst. Fault Address Register */
104 u32 hpfar; /* Hyp IPA Fault Address Register */
108 * 0 is reserved as an invalid value.
109 * Order should be kept in sync with the save/restore code.
113 c0_MPIDR, /* MultiProcessor ID Register */
114 c0_CSSELR, /* Cache Size Selection Register */
115 c1_SCTLR, /* System Control Register */
116 c1_ACTLR, /* Auxiliary Control Register */
117 c1_CPACR, /* Coprocessor Access Control */
118 c2_TTBR0, /* Translation Table Base Register 0 */
119 c2_TTBR0_high, /* TTBR0 top 32 bits */
120 c2_TTBR1, /* Translation Table Base Register 1 */
121 c2_TTBR1_high, /* TTBR1 top 32 bits */
122 c2_TTBCR, /* Translation Table Base Control R. */
123 c3_DACR, /* Domain Access Control Register */
124 c5_DFSR, /* Data Fault Status Register */
125 c5_IFSR, /* Instruction Fault Status Register */
126 c5_ADFSR, /* Auxilary Data Fault Status R */
127 c5_AIFSR, /* Auxilary Instrunction Fault Status R */
128 c6_DFAR, /* Data Fault Address Register */
129 c6_IFAR, /* Instruction Fault Address Register */
130 c7_PAR, /* Physical Address Register */
131 c7_PAR_high, /* PAR top 32 bits */
132 c9_L2CTLR, /* Cortex A15/A7 L2 Control Register */
133 c10_PRRR, /* Primary Region Remap Register */
134 c10_NMRR, /* Normal Memory Remap Register */
135 c12_VBAR, /* Vector Base Address Register */
136 c13_CID, /* Context ID Register */
137 c13_TID_URW, /* Thread ID, User R/W */
138 c13_TID_URO, /* Thread ID, User R/O */
139 c13_TID_PRIV, /* Thread ID, Privileged */
140 c14_CNTKCTL, /* Timer Control Register (PL1) */
141 c10_AMAIR0, /* Auxilary Memory Attribute Indirection Reg0 */
142 c10_AMAIR1, /* Auxilary Memory Attribute Indirection Reg1 */
143 NR_CP15_REGS /* Number of regs (incl. invalid) */
146 struct kvm_cpu_context {
147 struct kvm_regs gp_regs;
148 struct vfp_hard_struct vfp;
149 u32 cp15[NR_CP15_REGS];
152 typedef struct kvm_cpu_context kvm_cpu_context_t;
154 static inline void kvm_init_host_cpu_context(kvm_cpu_context_t *cpu_ctxt,
157 /* The host's MPIDR is immutable, so let's set it up at boot time */
158 cpu_ctxt->cp15[c0_MPIDR] = cpu_logical_map(cpu);
161 struct kvm_vcpu_arch {
162 struct kvm_cpu_context ctxt;
164 int target; /* Processor target */
165 DECLARE_BITMAP(features, KVM_VCPU_MAX_FEATURES);
167 /* The CPU type we expose to the VM */
170 /* HYP trapping configuration */
173 /* Exception Information */
174 struct kvm_vcpu_fault_info fault;
176 /* Host FP context */
177 kvm_cpu_context_t *host_cpu_context;
180 struct vgic_cpu vgic_cpu;
181 struct arch_timer_cpu timer_cpu;
184 * Anything that is not used directly from assembly code goes
188 /* vcpu power-off state */
191 /* Don't run the guest (internal implementation need) */
194 /* IO related fields */
195 struct kvm_decode mmio_decode;
197 /* Cache some mmu pages needed inside spinlock regions */
198 struct kvm_mmu_memory_cache mmu_page_cache;
200 /* Detect first run of a vcpu */
205 ulong remote_tlb_flush;
208 struct kvm_vcpu_stat {
209 u64 halt_successful_poll;
210 u64 halt_attempted_poll;
211 u64 halt_poll_invalid;
217 u64 mmio_exit_kernel;
221 #define vcpu_cp15(v,r) (v)->arch.ctxt.cp15[r]
223 int kvm_vcpu_preferred_target(struct kvm_vcpu_init *init);
224 unsigned long kvm_arm_num_regs(struct kvm_vcpu *vcpu);
225 int kvm_arm_copy_reg_indices(struct kvm_vcpu *vcpu, u64 __user *indices);
226 int kvm_arm_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg);
227 int kvm_arm_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg);
229 unsigned long __kvm_call_hyp(void *hypfn, ...);
232 * The has_vhe() part doesn't get emitted, but is used for type-checking.
234 #define kvm_call_hyp(f, ...) \
239 __kvm_call_hyp(kvm_ksym_ref(f), ##__VA_ARGS__); \
243 #define kvm_call_hyp_ret(f, ...) \
245 typeof(f(__VA_ARGS__)) ret; \
248 ret = f(__VA_ARGS__); \
250 ret = __kvm_call_hyp(kvm_ksym_ref(f), \
257 void force_vm_exit(const cpumask_t *mask);
258 int __kvm_arm_vcpu_get_events(struct kvm_vcpu *vcpu,
259 struct kvm_vcpu_events *events);
261 int __kvm_arm_vcpu_set_events(struct kvm_vcpu *vcpu,
262 struct kvm_vcpu_events *events);
264 #define KVM_ARCH_WANT_MMU_NOTIFIER
265 int kvm_unmap_hva_range(struct kvm *kvm,
266 unsigned long start, unsigned long end);
267 int kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte);
269 unsigned long kvm_arm_num_regs(struct kvm_vcpu *vcpu);
270 int kvm_arm_copy_reg_indices(struct kvm_vcpu *vcpu, u64 __user *indices);
271 int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end);
272 int kvm_test_age_hva(struct kvm *kvm, unsigned long hva);
274 struct kvm_vcpu *kvm_arm_get_running_vcpu(void);
275 struct kvm_vcpu __percpu **kvm_get_running_vcpus(void);
276 void kvm_arm_halt_guest(struct kvm *kvm);
277 void kvm_arm_resume_guest(struct kvm *kvm);
279 int kvm_arm_copy_coproc_indices(struct kvm_vcpu *vcpu, u64 __user *uindices);
280 unsigned long kvm_arm_num_coproc_regs(struct kvm_vcpu *vcpu);
281 int kvm_arm_coproc_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *);
282 int kvm_arm_coproc_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *);
284 int handle_exit(struct kvm_vcpu *vcpu, struct kvm_run *run,
285 int exception_index);
287 static inline void handle_exit_early(struct kvm_vcpu *vcpu, struct kvm_run *run,
288 int exception_index) {}
290 static inline void __cpu_init_hyp_mode(phys_addr_t pgd_ptr,
291 unsigned long hyp_stack_ptr,
292 unsigned long vector_ptr)
295 * Call initialization code, and switch to the full blown HYP
296 * code. The init code doesn't need to preserve these
297 * registers as r0-r3 are already callee saved according to
299 * Note that we slightly misuse the prototype by casting the
300 * stack pointer to a void *.
302 * The PGDs are always passed as the third argument, in order
303 * to be passed into r2-r3 to the init code (yes, this is
304 * compliant with the PCS!).
307 __kvm_call_hyp((void*)hyp_stack_ptr, vector_ptr, pgd_ptr);
310 static inline void __cpu_init_stage2(void)
312 kvm_call_hyp(__init_stage2_translation);
315 static inline int kvm_arch_vm_ioctl_check_extension(struct kvm *kvm, long ext)
320 int kvm_perf_init(void);
321 int kvm_perf_teardown(void);
323 void kvm_mmu_wp_memory_region(struct kvm *kvm, int slot);
325 struct kvm_vcpu *kvm_mpidr_to_vcpu(struct kvm *kvm, unsigned long mpidr);
327 static inline bool kvm_arch_requires_vhe(void) { return false; }
328 static inline void kvm_arch_hardware_unsetup(void) {}
329 static inline void kvm_arch_sync_events(struct kvm *kvm) {}
330 static inline void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu) {}
331 static inline void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu) {}
332 static inline void kvm_arch_vcpu_block_finish(struct kvm_vcpu *vcpu) {}
334 static inline void kvm_arm_init_debug(void) {}
335 static inline void kvm_arm_setup_debug(struct kvm_vcpu *vcpu) {}
336 static inline void kvm_arm_clear_debug(struct kvm_vcpu *vcpu) {}
337 static inline void kvm_arm_reset_debug_ptr(struct kvm_vcpu *vcpu) {}
339 int kvm_arm_vcpu_arch_set_attr(struct kvm_vcpu *vcpu,
340 struct kvm_device_attr *attr);
341 int kvm_arm_vcpu_arch_get_attr(struct kvm_vcpu *vcpu,
342 struct kvm_device_attr *attr);
343 int kvm_arm_vcpu_arch_has_attr(struct kvm_vcpu *vcpu,
344 struct kvm_device_attr *attr);
347 * VFP/NEON switching is all done by the hyp switch code, so no need to
348 * coordinate with host context handling for this state:
350 static inline void kvm_arch_vcpu_load_fp(struct kvm_vcpu *vcpu) {}
351 static inline void kvm_arch_vcpu_ctxsync_fp(struct kvm_vcpu *vcpu) {}
352 static inline void kvm_arch_vcpu_put_fp(struct kvm_vcpu *vcpu) {}
354 static inline void kvm_arm_vhe_guest_enter(void) {}
355 static inline void kvm_arm_vhe_guest_exit(void) {}
357 static inline bool kvm_arm_harden_branch_predictor(void)
359 switch(read_cpuid_part()) {
360 #ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
361 case ARM_CPU_PART_BRAHMA_B15:
362 case ARM_CPU_PART_CORTEX_A12:
363 case ARM_CPU_PART_CORTEX_A15:
364 case ARM_CPU_PART_CORTEX_A17:
372 #define KVM_SSBD_UNKNOWN -1
373 #define KVM_SSBD_FORCE_DISABLE 0
374 #define KVM_SSBD_KERNEL 1
375 #define KVM_SSBD_FORCE_ENABLE 2
376 #define KVM_SSBD_MITIGATED 3
378 static inline int kvm_arm_have_ssbd(void)
380 /* No way to detect it yet, pretend it is not there. */
381 return KVM_SSBD_UNKNOWN;
384 static inline void kvm_vcpu_load_sysregs(struct kvm_vcpu *vcpu) {}
385 static inline void kvm_vcpu_put_sysregs(struct kvm_vcpu *vcpu) {}
387 #define __KVM_HAVE_ARCH_VM_ALLOC
388 struct kvm *kvm_arch_alloc_vm(void);
389 void kvm_arch_free_vm(struct kvm *kvm);
391 static inline int kvm_arm_setup_stage2(struct kvm *kvm, unsigned long type)
394 * On 32bit ARM, VMs get a static 40bit IPA stage2 setup,
395 * so any non-zero value used as type is illegal.
402 #endif /* __ARM_KVM_HOST_H__ */