Commit | Line | Data |
---|---|---|
d94d71cb | 1 | /* SPDX-License-Identifier: GPL-2.0-only */ |
749cf76c CD |
2 | /* |
3 | * Copyright (C) 2012 - Virtual Open Systems and Columbia University | |
4 | * Author: Christoffer Dall <c.dall@virtualopensystems.com> | |
749cf76c CD |
5 | */ |
6 | ||
7 | #ifndef __ARM_KVM_HOST_H__ | |
8 | #define __ARM_KVM_HOST_H__ | |
9 | ||
b48c1a45 | 10 | #include <linux/arm-smccc.h> |
7dd32a0d | 11 | #include <linux/errno.h> |
65647300 PB |
12 | #include <linux/types.h> |
13 | #include <linux/kvm_types.h> | |
add56098 | 14 | #include <asm/cputype.h> |
749cf76c CD |
15 | #include <asm/kvm.h> |
16 | #include <asm/kvm_asm.h> | |
f7ed45be | 17 | #include <asm/fpstate.h> |
7275acdf | 18 | #include <kvm/arm_arch_timer.h> |
749cf76c | 19 | |
c1426e4c EA |
20 | #define __KVM_HAVE_ARCH_INTC_INITIALIZED |
21 | ||
2b5e1e47 | 22 | #define KVM_USER_MEM_SLOTS 32 |
1138245c | 23 | #define KVM_HAVE_ONE_REG |
920552b2 | 24 | #define KVM_HALT_POLL_NS_DEFAULT 500000 |
749cf76c | 25 | |
7d0f84aa | 26 | #define KVM_VCPU_MAX_FEATURES 2 |
749cf76c | 27 | |
7275acdf | 28 | #include <kvm/arm_vgic.h> |
1a89dd91 | 29 | |
acda5430 VM |
30 | |
31 | #ifdef CONFIG_ARM_GIC_V3 | |
32 | #define KVM_MAX_VCPUS VGIC_V3_MAX_CPUS | |
33 | #else | |
ef748917 | 34 | #define KVM_MAX_VCPUS VGIC_V2_MAX_CPUS |
acda5430 | 35 | #endif |
ef748917 | 36 | |
7b244e2b | 37 | #define KVM_REQ_SLEEP \ |
2387149e | 38 | KVM_ARCH_REQ_FLAGS(0, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP) |
325f9c64 | 39 | #define KVM_REQ_IRQ_PENDING KVM_ARCH_REQ(1) |
358b28f0 | 40 | #define KVM_REQ_VCPU_RESET KVM_ARCH_REQ(2) |
8564d637 | 41 | #define KVM_REQ_RECORD_STEAL KVM_ARCH_REQ(3) |
b13216cf | 42 | |
61bbe380 CD |
43 | DECLARE_STATIC_KEY_FALSE(userspace_irqchip_in_use); |
44 | ||
a3be836d | 45 | static inline int kvm_arm_init_sve(void) { return 0; } |
0f062bfe | 46 | |
749cf76c | 47 | u32 *kvm_vcpu_reg(struct kvm_vcpu *vcpu, u8 reg_num, u32 mode); |
6951e48b | 48 | int __attribute_const__ kvm_target_cpu(void); |
749cf76c CD |
49 | int kvm_reset_vcpu(struct kvm_vcpu *vcpu); |
50 | void kvm_reset_coprocs(struct kvm_vcpu *vcpu); | |
51 | ||
e329fb75 CD |
52 | struct kvm_vmid { |
53 | /* The VMID generation used for the virt. memory system */ | |
54 | u64 vmid_gen; | |
55 | u32 vmid; | |
56 | }; | |
749cf76c | 57 | |
e329fb75 | 58 | struct kvm_arch { |
94d0e598 MZ |
59 | /* The last vcpu id that ran on each physical CPU */ |
60 | int __percpu *last_vcpu_ran; | |
61 | ||
749cf76c CD |
62 | /* |
63 | * Anything that is not used directly from assembly code goes | |
64 | * here. | |
65 | */ | |
66 | ||
67 | /* The VMID generation used for the virt. memory system */ | |
e329fb75 | 68 | struct kvm_vmid vmid; |
749cf76c CD |
69 | |
70 | /* Stage-2 page table */ | |
71 | pgd_t *pgd; | |
e329fb75 | 72 | phys_addr_t pgd_phys; |
1a89dd91 MZ |
73 | |
74 | /* Interrupt controller */ | |
75 | struct vgic_dist vgic; | |
3caa2d8c | 76 | int max_vcpus; |
85bd0ba1 MZ |
77 | |
78 | /* Mandated version of PSCI */ | |
79 | u32 psci_version; | |
c726200d CD |
80 | |
81 | /* | |
82 | * If we encounter a data abort without valid instruction syndrome | |
83 | * information, report this to user space. User space can (and | |
84 | * should) opt in to this feature if KVM_CAP_ARM_NISV_TO_USER is | |
85 | * supported. | |
86 | */ | |
87 | bool return_nisv_io_abort_to_user; | |
749cf76c CD |
88 | }; |
89 | ||
90 | #define KVM_NR_MEM_OBJS 40 | |
91 | ||
92 | /* | |
93 | * We don't want allocation failures within the mmu code, so we preallocate | |
94 | * enough memory for a single page fault in a cache. | |
95 | */ | |
96 | struct kvm_mmu_memory_cache { | |
97 | int nobjs; | |
98 | void *objects[KVM_NR_MEM_OBJS]; | |
99 | }; | |
100 | ||
7393b599 MZ |
101 | struct kvm_vcpu_fault_info { |
102 | u32 hsr; /* Hyp Syndrome Register */ | |
103 | u32 hxfar; /* Hyp Data/Inst. Fault Address Register */ | |
104 | u32 hpfar; /* Hyp IPA Fault Address Register */ | |
7393b599 MZ |
105 | }; |
106 | ||
4448932f MZ |
107 | /* |
108 | * 0 is reserved as an invalid value. | |
109 | * Order should be kept in sync with the save/restore code. | |
110 | */ | |
111 | enum vcpu_sysreg { | |
112 | __INVALID_SYSREG__, | |
113 | c0_MPIDR, /* MultiProcessor ID Register */ | |
114 | c0_CSSELR, /* Cache Size Selection Register */ | |
115 | c1_SCTLR, /* System Control Register */ | |
116 | c1_ACTLR, /* Auxiliary Control Register */ | |
117 | c1_CPACR, /* Coprocessor Access Control */ | |
118 | c2_TTBR0, /* Translation Table Base Register 0 */ | |
119 | c2_TTBR0_high, /* TTBR0 top 32 bits */ | |
120 | c2_TTBR1, /* Translation Table Base Register 1 */ | |
121 | c2_TTBR1_high, /* TTBR1 top 32 bits */ | |
122 | c2_TTBCR, /* Translation Table Base Control R. */ | |
123 | c3_DACR, /* Domain Access Control Register */ | |
124 | c5_DFSR, /* Data Fault Status Register */ | |
125 | c5_IFSR, /* Instruction Fault Status Register */ | |
126 | c5_ADFSR, /* Auxilary Data Fault Status R */ | |
127 | c5_AIFSR, /* Auxilary Instrunction Fault Status R */ | |
128 | c6_DFAR, /* Data Fault Address Register */ | |
129 | c6_IFAR, /* Instruction Fault Address Register */ | |
130 | c7_PAR, /* Physical Address Register */ | |
131 | c7_PAR_high, /* PAR top 32 bits */ | |
132 | c9_L2CTLR, /* Cortex A15/A7 L2 Control Register */ | |
133 | c10_PRRR, /* Primary Region Remap Register */ | |
134 | c10_NMRR, /* Normal Memory Remap Register */ | |
135 | c12_VBAR, /* Vector Base Address Register */ | |
136 | c13_CID, /* Context ID Register */ | |
137 | c13_TID_URW, /* Thread ID, User R/W */ | |
138 | c13_TID_URO, /* Thread ID, User R/O */ | |
139 | c13_TID_PRIV, /* Thread ID, Privileged */ | |
140 | c14_CNTKCTL, /* Timer Control Register (PL1) */ | |
141 | c10_AMAIR0, /* Auxilary Memory Attribute Indirection Reg0 */ | |
142 | c10_AMAIR1, /* Auxilary Memory Attribute Indirection Reg1 */ | |
143 | NR_CP15_REGS /* Number of regs (incl. invalid) */ | |
144 | }; | |
145 | ||
0ca5565d | 146 | struct kvm_cpu_context { |
c2a8dab5 | 147 | struct kvm_regs gp_regs; |
0ca5565d | 148 | struct vfp_hard_struct vfp; |
fb32a52a | 149 | u32 cp15[NR_CP15_REGS]; |
0ca5565d MZ |
150 | }; |
151 | ||
630a1685 AM |
152 | struct kvm_host_data { |
153 | struct kvm_cpu_context host_ctxt; | |
154 | }; | |
155 | ||
156 | typedef struct kvm_host_data kvm_host_data_t; | |
9c7a6432 | 157 | |
1e0cf16c | 158 | static inline void kvm_init_host_cpu_context(struct kvm_cpu_context *cpu_ctxt) |
32f13955 MZ |
159 | { |
160 | /* The host's MPIDR is immutable, so let's set it up at boot time */ | |
1e0cf16c | 161 | cpu_ctxt->cp15[c0_MPIDR] = read_cpuid_mpidr(); |
32f13955 MZ |
162 | } |
163 | ||
358b28f0 MZ |
164 | struct vcpu_reset_state { |
165 | unsigned long pc; | |
166 | unsigned long r0; | |
167 | bool be; | |
168 | bool reset; | |
169 | }; | |
170 | ||
749cf76c | 171 | struct kvm_vcpu_arch { |
0ca5565d MZ |
172 | struct kvm_cpu_context ctxt; |
173 | ||
749cf76c CD |
174 | int target; /* Processor target */ |
175 | DECLARE_BITMAP(features, KVM_VCPU_MAX_FEATURES); | |
176 | ||
749cf76c CD |
177 | /* The CPU type we expose to the VM */ |
178 | u32 midr; | |
179 | ||
ac30a11e MZ |
180 | /* HYP trapping configuration */ |
181 | u32 hcr; | |
182 | ||
749cf76c | 183 | /* Exception Information */ |
7393b599 | 184 | struct kvm_vcpu_fault_info fault; |
749cf76c | 185 | |
3de50da6 | 186 | /* Host FP context */ |
630a1685 | 187 | struct kvm_cpu_context *host_cpu_context; |
f7ed45be | 188 | |
1a89dd91 MZ |
189 | /* VGIC state */ |
190 | struct vgic_cpu vgic_cpu; | |
53e72406 | 191 | struct arch_timer_cpu timer_cpu; |
1a89dd91 | 192 | |
f7ed45be CD |
193 | /* |
194 | * Anything that is not used directly from assembly code goes | |
195 | * here. | |
196 | */ | |
5b3e5e5b | 197 | |
3781528e EA |
198 | /* vcpu power-off state */ |
199 | bool power_off; | |
aa024c2f | 200 | |
3b92830a EA |
201 | /* Don't run the guest (internal implementation need) */ |
202 | bool pause; | |
203 | ||
749cf76c CD |
204 | /* Cache some mmu pages needed inside spinlock regions */ |
205 | struct kvm_mmu_memory_cache mmu_page_cache; | |
f7ed45be | 206 | |
358b28f0 MZ |
207 | struct vcpu_reset_state reset_state; |
208 | ||
f7ed45be CD |
209 | /* Detect first run of a vcpu */ |
210 | bool has_run_once; | |
749cf76c CD |
211 | }; |
212 | ||
213 | struct kvm_vm_stat { | |
8a7e75d4 | 214 | ulong remote_tlb_flush; |
749cf76c CD |
215 | }; |
216 | ||
217 | struct kvm_vcpu_stat { | |
8a7e75d4 SJS |
218 | u64 halt_successful_poll; |
219 | u64 halt_attempted_poll; | |
220 | u64 halt_poll_invalid; | |
221 | u64 halt_wakeup; | |
222 | u64 hvc_exit_stat; | |
b19e6892 AT |
223 | u64 wfe_exit_stat; |
224 | u64 wfi_exit_stat; | |
225 | u64 mmio_exit_user; | |
226 | u64 mmio_exit_kernel; | |
227 | u64 exits; | |
749cf76c CD |
228 | }; |
229 | ||
fb32a52a MZ |
230 | #define vcpu_cp15(v,r) (v)->arch.ctxt.cp15[r] |
231 | ||
4a6fee80 | 232 | int kvm_vcpu_preferred_target(struct kvm_vcpu_init *init); |
749cf76c CD |
233 | unsigned long kvm_arm_num_regs(struct kvm_vcpu *vcpu); |
234 | int kvm_arm_copy_reg_indices(struct kvm_vcpu *vcpu, u64 __user *indices); | |
749cf76c CD |
235 | int kvm_arm_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg); |
236 | int kvm_arm_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg); | |
7aa8d146 | 237 | |
d18232ea MZ |
238 | unsigned long __kvm_call_hyp(void *hypfn, ...); |
239 | ||
240 | /* | |
241 | * The has_vhe() part doesn't get emitted, but is used for type-checking. | |
242 | */ | |
243 | #define kvm_call_hyp(f, ...) \ | |
244 | do { \ | |
245 | if (has_vhe()) { \ | |
246 | f(__VA_ARGS__); \ | |
247 | } else { \ | |
248 | __kvm_call_hyp(kvm_ksym_ref(f), ##__VA_ARGS__); \ | |
249 | } \ | |
250 | } while(0) | |
251 | ||
252 | #define kvm_call_hyp_ret(f, ...) \ | |
253 | ({ \ | |
254 | typeof(f(__VA_ARGS__)) ret; \ | |
255 | \ | |
256 | if (has_vhe()) { \ | |
257 | ret = f(__VA_ARGS__); \ | |
258 | } else { \ | |
259 | ret = __kvm_call_hyp(kvm_ksym_ref(f), \ | |
260 | ##__VA_ARGS__); \ | |
261 | } \ | |
262 | \ | |
263 | ret; \ | |
264 | }) | |
7aa8d146 | 265 | |
f7ed45be | 266 | void force_vm_exit(const cpumask_t *mask); |
b0960b95 JM |
267 | int __kvm_arm_vcpu_get_events(struct kvm_vcpu *vcpu, |
268 | struct kvm_vcpu_events *events); | |
269 | ||
270 | int __kvm_arm_vcpu_set_events(struct kvm_vcpu *vcpu, | |
271 | struct kvm_vcpu_events *events); | |
d5d8184d CD |
272 | |
273 | #define KVM_ARCH_WANT_MMU_NOTIFIER | |
d5d8184d CD |
274 | int kvm_unmap_hva_range(struct kvm *kvm, |
275 | unsigned long start, unsigned long end); | |
748c0e31 | 276 | int kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte); |
d5d8184d | 277 | |
1138245c CD |
278 | unsigned long kvm_arm_num_regs(struct kvm_vcpu *vcpu); |
279 | int kvm_arm_copy_reg_indices(struct kvm_vcpu *vcpu, u64 __user *indices); | |
35307b9a MZ |
280 | int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end); |
281 | int kvm_test_age_hva(struct kvm *kvm, unsigned long hva); | |
1138245c | 282 | |
b13216cf CD |
283 | void kvm_arm_halt_guest(struct kvm *kvm); |
284 | void kvm_arm_resume_guest(struct kvm *kvm); | |
1638a12d MZ |
285 | |
286 | int kvm_arm_copy_coproc_indices(struct kvm_vcpu *vcpu, u64 __user *uindices); | |
287 | unsigned long kvm_arm_num_coproc_regs(struct kvm_vcpu *vcpu); | |
1638a12d MZ |
288 | int kvm_arm_coproc_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *); |
289 | int kvm_arm_coproc_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *); | |
290 | ||
3414bbff MZ |
291 | int handle_exit(struct kvm_vcpu *vcpu, struct kvm_run *run, |
292 | int exception_index); | |
293 | ||
3368bd80 JM |
294 | static inline void handle_exit_early(struct kvm_vcpu *vcpu, struct kvm_run *run, |
295 | int exception_index) {} | |
296 | ||
0e20f5e2 MZ |
297 | /* MMIO helpers */ |
298 | void kvm_mmio_write_buf(void *buf, unsigned int len, unsigned long data); | |
299 | unsigned long kvm_mmio_read_buf(const void *buf, unsigned int len); | |
300 | ||
301 | int kvm_handle_mmio_return(struct kvm_vcpu *vcpu, struct kvm_run *run); | |
302 | int io_mem_abort(struct kvm_vcpu *vcpu, struct kvm_run *run, | |
303 | phys_addr_t fault_ipa); | |
304 | ||
12fda812 | 305 | static inline void __cpu_init_hyp_mode(phys_addr_t pgd_ptr, |
e7858c58 MZ |
306 | unsigned long hyp_stack_ptr, |
307 | unsigned long vector_ptr) | |
308 | { | |
e7858c58 | 309 | /* |
5a677ce0 MZ |
310 | * Call initialization code, and switch to the full blown HYP |
311 | * code. The init code doesn't need to preserve these | |
312 | * registers as r0-r3 are already callee saved according to | |
313 | * the AAPCS. | |
cd602a37 | 314 | * Note that we slightly misuse the prototype by casting the |
5a677ce0 | 315 | * stack pointer to a void *. |
5a677ce0 | 316 | |
cd602a37 MZ |
317 | * The PGDs are always passed as the third argument, in order |
318 | * to be passed into r2-r3 to the init code (yes, this is | |
319 | * compliant with the PCS!). | |
320 | */ | |
5a677ce0 | 321 | |
d18232ea | 322 | __kvm_call_hyp((void*)hyp_stack_ptr, vector_ptr, pgd_ptr); |
e7858c58 MZ |
323 | } |
324 | ||
35a2491a MZ |
325 | static inline void __cpu_init_stage2(void) |
326 | { | |
d4c7688c | 327 | kvm_call_hyp(__init_stage2_translation); |
35a2491a MZ |
328 | } |
329 | ||
375bdd3b | 330 | static inline int kvm_arch_vm_ioctl_check_extension(struct kvm *kvm, long ext) |
17b1e31f MZ |
331 | { |
332 | return 0; | |
333 | } | |
334 | ||
210552c1 MZ |
335 | int kvm_perf_init(void); |
336 | int kvm_perf_teardown(void); | |
337 | ||
b48c1a45 SP |
338 | static inline long kvm_hypercall_pv_features(struct kvm_vcpu *vcpu) |
339 | { | |
340 | return SMCCC_RET_NOT_SUPPORTED; | |
341 | } | |
342 | ||
8564d637 SP |
343 | static inline gpa_t kvm_init_stolen_time(struct kvm_vcpu *vcpu) |
344 | { | |
345 | return GPA_INVALID; | |
346 | } | |
347 | ||
348 | static inline void kvm_update_stolen_time(struct kvm_vcpu *vcpu) | |
349 | { | |
350 | } | |
351 | ||
352 | static inline void kvm_arm_pvtime_vcpu_init(struct kvm_vcpu_arch *vcpu_arch) | |
353 | { | |
354 | } | |
355 | ||
356 | static inline bool kvm_arm_is_pvtime_enabled(struct kvm_vcpu_arch *vcpu_arch) | |
357 | { | |
358 | return false; | |
359 | } | |
360 | ||
c6473555 MS |
361 | void kvm_mmu_wp_memory_region(struct kvm *kvm, int slot); |
362 | ||
4429fc64 AP |
363 | struct kvm_vcpu *kvm_mpidr_to_vcpu(struct kvm *kvm, unsigned long mpidr); |
364 | ||
33e5f4e5 | 365 | static inline bool kvm_arch_requires_vhe(void) { return false; } |
0865e636 RK |
366 | static inline void kvm_arch_hardware_unsetup(void) {} |
367 | static inline void kvm_arch_sync_events(struct kvm *kvm) {} | |
0865e636 | 368 | static inline void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu) {} |
3491caf2 | 369 | static inline void kvm_arch_vcpu_block_finish(struct kvm_vcpu *vcpu) {} |
19bcc89e | 370 | static inline void kvm_arm_vcpu_destroy(struct kvm_vcpu *vcpu) {} |
0865e636 | 371 | |
56c7f5e7 AB |
372 | static inline void kvm_arm_init_debug(void) {} |
373 | static inline void kvm_arm_setup_debug(struct kvm_vcpu *vcpu) {} | |
374 | static inline void kvm_arm_clear_debug(struct kvm_vcpu *vcpu) {} | |
84e690bf | 375 | static inline void kvm_arm_reset_debug_ptr(struct kvm_vcpu *vcpu) {} |
2227e439 CD |
376 | |
377 | int kvm_arm_vcpu_arch_set_attr(struct kvm_vcpu *vcpu, | |
378 | struct kvm_device_attr *attr); | |
379 | int kvm_arm_vcpu_arch_get_attr(struct kvm_vcpu *vcpu, | |
380 | struct kvm_device_attr *attr); | |
381 | int kvm_arm_vcpu_arch_has_attr(struct kvm_vcpu *vcpu, | |
382 | struct kvm_device_attr *attr); | |
56c7f5e7 | 383 | |
e6b673b7 DM |
384 | /* |
385 | * VFP/NEON switching is all done by the hyp switch code, so no need to | |
386 | * coordinate with host context handling for this state: | |
387 | */ | |
388 | static inline void kvm_arch_vcpu_load_fp(struct kvm_vcpu *vcpu) {} | |
389 | static inline void kvm_arch_vcpu_ctxsync_fp(struct kvm_vcpu *vcpu) {} | |
390 | static inline void kvm_arch_vcpu_put_fp(struct kvm_vcpu *vcpu) {} | |
17eed27b | 391 | |
435e53fb AM |
392 | static inline void kvm_vcpu_pmu_restore_guest(struct kvm_vcpu *vcpu) {} |
393 | static inline void kvm_vcpu_pmu_restore_host(struct kvm_vcpu *vcpu) {} | |
394 | ||
c118bbb5 AP |
395 | #define KVM_BP_HARDEN_UNKNOWN -1 |
396 | #define KVM_BP_HARDEN_WA_NEEDED 0 | |
397 | #define KVM_BP_HARDEN_NOT_REQUIRED 1 | |
398 | ||
399 | static inline int kvm_arm_harden_branch_predictor(void) | |
6167ec5c | 400 | { |
add56098 RK |
401 | switch(read_cpuid_part()) { |
402 | #ifdef CONFIG_HARDEN_BRANCH_PREDICTOR | |
403 | case ARM_CPU_PART_BRAHMA_B15: | |
404 | case ARM_CPU_PART_CORTEX_A12: | |
405 | case ARM_CPU_PART_CORTEX_A15: | |
406 | case ARM_CPU_PART_CORTEX_A17: | |
c118bbb5 | 407 | return KVM_BP_HARDEN_WA_NEEDED; |
add56098 | 408 | #endif |
c118bbb5 AP |
409 | case ARM_CPU_PART_CORTEX_A7: |
410 | return KVM_BP_HARDEN_NOT_REQUIRED; | |
add56098 | 411 | default: |
c118bbb5 | 412 | return KVM_BP_HARDEN_UNKNOWN; |
add56098 | 413 | } |
6167ec5c MZ |
414 | } |
415 | ||
5d81f7dc MZ |
416 | #define KVM_SSBD_UNKNOWN -1 |
417 | #define KVM_SSBD_FORCE_DISABLE 0 | |
418 | #define KVM_SSBD_KERNEL 1 | |
419 | #define KVM_SSBD_FORCE_ENABLE 2 | |
420 | #define KVM_SSBD_MITIGATED 3 | |
421 | ||
422 | static inline int kvm_arm_have_ssbd(void) | |
423 | { | |
424 | /* No way to detect it yet, pretend it is not there. */ | |
425 | return KVM_SSBD_UNKNOWN; | |
426 | } | |
427 | ||
bc192cee CD |
428 | static inline void kvm_vcpu_load_sysregs(struct kvm_vcpu *vcpu) {} |
429 | static inline void kvm_vcpu_put_sysregs(struct kvm_vcpu *vcpu) {} | |
430 | ||
d1e5b0e9 MO |
431 | #define __KVM_HAVE_ARCH_VM_ALLOC |
432 | struct kvm *kvm_arch_alloc_vm(void); | |
433 | void kvm_arch_free_vm(struct kvm *kvm); | |
434 | ||
bca607eb | 435 | static inline int kvm_arm_setup_stage2(struct kvm *kvm, unsigned long type) |
5b6c6742 | 436 | { |
bca607eb MZ |
437 | /* |
438 | * On 32bit ARM, VMs get a static 40bit IPA stage2 setup, | |
439 | * so any non-zero value used as type is illegal. | |
440 | */ | |
5b6c6742 SP |
441 | if (type) |
442 | return -EINVAL; | |
443 | return 0; | |
444 | } | |
445 | ||
92e68b2b | 446 | static inline int kvm_arm_vcpu_finalize(struct kvm_vcpu *vcpu, int feature) |
0323e027 DM |
447 | { |
448 | return -EINVAL; | |
449 | } | |
450 | ||
451 | static inline bool kvm_arm_vcpu_is_finalized(struct kvm_vcpu *vcpu) | |
452 | { | |
453 | return true; | |
454 | } | |
7dd32a0d | 455 | |
749cf76c | 456 | #endif /* __ARM_KVM_HOST_H__ */ |