Commit | Line | Data |
---|---|---|
caab277b | 1 | /* SPDX-License-Identifier: GPL-2.0-only */ |
c76a0a66 MZ |
2 | /* |
3 | * Copyright (C) 2015 - ARM Ltd | |
4 | * Author: Marc Zyngier <marc.zyngier@arm.com> | |
c76a0a66 MZ |
5 | */ |
6 | ||
7 | #ifndef __ARM64_KVM_HYP_H__ | |
8 | #define __ARM64_KVM_HYP_H__ | |
9 | ||
10 | #include <linux/compiler.h> | |
11 | #include <linux/kvm_host.h> | |
1e4448c5 | 12 | #include <asm/alternative.h> |
c76a0a66 MZ |
13 | #include <asm/sysreg.h> |
14 | ||
b619d9aa | 15 | DECLARE_PER_CPU(struct kvm_cpu_context, kvm_hyp_ctxt); |
a0e47952 | 16 | DECLARE_PER_CPU(unsigned long, kvm_hyp_vector); |
63fec243 | 17 | DECLARE_PER_CPU(struct kvm_nvhe_init_params, kvm_init_params); |
a0e47952 | 18 | |
57e784b4 MZ |
19 | /* |
20 | * Unified accessors for registers that have a different encoding | |
21 | * between VHE and non-VHE. They must be specified without their "ELx" | |
22 | * encoding, but with the SYS_ prefix, as defined in asm/sysreg.h. | |
23 | */ | |
24 | ||
25 | #if defined(__KVM_VHE_HYPERVISOR__) | |
26 | ||
27 | #define read_sysreg_el0(r) read_sysreg_s(r##_EL02) | |
28 | #define write_sysreg_el0(v,r) write_sysreg_s(v, r##_EL02) | |
29 | #define read_sysreg_el1(r) read_sysreg_s(r##_EL12) | |
30 | #define write_sysreg_el1(v,r) write_sysreg_s(v, r##_EL12) | |
31 | #define read_sysreg_el2(r) read_sysreg_s(r##_EL1) | |
32 | #define write_sysreg_el2(v,r) write_sysreg_s(v, r##_EL1) | |
33 | ||
34 | #else // !__KVM_VHE_HYPERVISOR__ | |
35 | ||
6f617d3a MZ |
36 | #if defined(__KVM_NVHE_HYPERVISOR__) |
37 | #define VHE_ALT_KEY ARM64_KVM_HVHE | |
38 | #else | |
39 | #define VHE_ALT_KEY ARM64_HAS_VIRT_HOST_EXTN | |
40 | #endif | |
41 | ||
915ccd1d MZ |
42 | #define read_sysreg_elx(r,nvh,vh) \ |
43 | ({ \ | |
44 | u64 reg; \ | |
6f617d3a | 45 | asm volatile(ALTERNATIVE(__mrs_s("%0", r##nvh), \ |
be604c61 | 46 | __mrs_s("%0", r##vh), \ |
6f617d3a | 47 | VHE_ALT_KEY) \ |
915ccd1d MZ |
48 | : "=r" (reg)); \ |
49 | reg; \ | |
50 | }) | |
51 | ||
52 | #define write_sysreg_elx(v,r,nvh,vh) \ | |
53 | do { \ | |
54 | u64 __val = (u64)(v); \ | |
fdec2a9e | 55 | asm volatile(ALTERNATIVE(__msr_s(r##nvh, "%x0"), \ |
be604c61 | 56 | __msr_s(r##vh, "%x0"), \ |
6f617d3a | 57 | VHE_ALT_KEY) \ |
915ccd1d MZ |
58 | : : "rZ" (__val)); \ |
59 | } while (0) | |
60 | ||
915ccd1d MZ |
61 | #define read_sysreg_el0(r) read_sysreg_elx(r, _EL0, _EL02) |
62 | #define write_sysreg_el0(v,r) write_sysreg_elx(v, r, _EL0, _EL02) | |
63 | #define read_sysreg_el1(r) read_sysreg_elx(r, _EL1, _EL12) | |
64 | #define write_sysreg_el1(v,r) write_sysreg_elx(v, r, _EL1, _EL12) | |
fdec2a9e DM |
65 | #define read_sysreg_el2(r) read_sysreg_elx(r, _EL2, _EL1) |
66 | #define write_sysreg_el2(v,r) write_sysreg_elx(v, r, _EL2, _EL1) | |
915ccd1d | 67 | |
57e784b4 MZ |
68 | #endif // __KVM_VHE_HYPERVISOR__ |
69 | ||
8c2d146e JM |
70 | /* |
71 | * Without an __arch_swab32(), we fall back to ___constant_swab32(), but the | |
72 | * static inline can allow the compiler to out-of-line this. KVM always wants | |
73 | * the macro version as its always inlined. | |
74 | */ | |
75 | #define __kvm_swab32(x) ___constant_swab32(x) | |
76 | ||
3272f0d0 | 77 | int __vgic_v2_perform_cpuif_access(struct kvm_vcpu *vcpu); |
06282fd2 | 78 | |
fc5d1f1a CD |
79 | void __vgic_v3_save_state(struct vgic_v3_cpu_if *cpu_if); |
80 | void __vgic_v3_restore_state(struct vgic_v3_cpu_if *cpu_if); | |
81 | void __vgic_v3_activate_traps(struct vgic_v3_cpu_if *cpu_if); | |
82 | void __vgic_v3_deactivate_traps(struct vgic_v3_cpu_if *cpu_if); | |
83 | void __vgic_v3_save_aprs(struct vgic_v3_cpu_if *cpu_if); | |
84 | void __vgic_v3_restore_aprs(struct vgic_v3_cpu_if *cpu_if); | |
59da1cbf | 85 | int __vgic_v3_perform_cpuif_access(struct kvm_vcpu *vcpu); |
f68d2b1b | 86 | |
9aebdea4 | 87 | #ifdef __KVM_NVHE_HYPERVISOR__ |
688c50aa CD |
88 | void __timer_enable_traps(struct kvm_vcpu *vcpu); |
89 | void __timer_disable_traps(struct kvm_vcpu *vcpu); | |
9aebdea4 | 90 | #endif |
1431af36 | 91 | |
13aeb9b4 | 92 | #ifdef __KVM_NVHE_HYPERVISOR__ |
4cdecaba CD |
93 | void __sysreg_save_state_nvhe(struct kvm_cpu_context *ctxt); |
94 | void __sysreg_restore_state_nvhe(struct kvm_cpu_context *ctxt); | |
13aeb9b4 | 95 | #else |
f837453d CD |
96 | void sysreg_save_host_state_vhe(struct kvm_cpu_context *ctxt); |
97 | void sysreg_restore_host_state_vhe(struct kvm_cpu_context *ctxt); | |
98 | void sysreg_save_guest_state_vhe(struct kvm_cpu_context *ctxt); | |
99 | void sysreg_restore_guest_state_vhe(struct kvm_cpu_context *ctxt); | |
13aeb9b4 | 100 | #endif |
6d6ec20f | 101 | |
014c4c77 CD |
102 | void __debug_switch_to_guest(struct kvm_vcpu *vcpu); |
103 | void __debug_switch_to_host(struct kvm_vcpu *vcpu); | |
8eb99267 | 104 | |
b96b0c5d SP |
105 | #ifdef __KVM_NVHE_HYPERVISOR__ |
106 | void __debug_save_host_buffers_nvhe(struct kvm_vcpu *vcpu); | |
107 | void __debug_restore_host_buffers_nvhe(struct kvm_vcpu *vcpu); | |
108 | #endif | |
109 | ||
c13d1683 MZ |
110 | void __fpsimd_save_state(struct user_fpsimd_state *fp_regs); |
111 | void __fpsimd_restore_state(struct user_fpsimd_state *fp_regs); | |
52029198 | 112 | void __sve_restore_state(void *sve_pffr, u32 *fpsr); |
c13d1683 | 113 | |
09cf57eb | 114 | #ifndef __KVM_NVHE_HYPERVISOR__ |
a2465629 | 115 | void activate_traps_vhe_load(struct kvm_vcpu *vcpu); |
1460b4b2 | 116 | void deactivate_traps_vhe_put(struct kvm_vcpu *vcpu); |
09cf57eb | 117 | #endif |
a2465629 | 118 | |
b619d9aa | 119 | u64 __guest_enter(struct kvm_vcpu *vcpu); |
09cf57eb | 120 | |
373beef0 | 121 | bool kvm_host_psci_handler(struct kvm_cpu_context *host_ctxt, u32 func_id); |
eeeee719 | 122 | |
09cf57eb | 123 | #ifdef __KVM_NVHE_HYPERVISOR__ |
c4b000c3 AS |
124 | void __noreturn __hyp_do_panic(struct kvm_cpu_context *host_ctxt, u64 spsr, |
125 | u64 elr, u64 par); | |
09cf57eb | 126 | #endif |
b97b66c1 | 127 | |
f320bc74 QP |
128 | #ifdef __KVM_NVHE_HYPERVISOR__ |
129 | void __pkvm_init_switch_pgd(phys_addr_t phys, unsigned long size, | |
130 | phys_addr_t pgd, void *sp, void *cont_fn); | |
131 | int __pkvm_init(phys_addr_t phys, unsigned long size, unsigned long nr_cpus, | |
132 | unsigned long *per_cpu_base, u32 hyp_va_bits); | |
133 | void __noreturn __host_enter(struct kvm_cpu_context *host_ctxt); | |
134 | #endif | |
135 | ||
6c30bfb1 FT |
136 | extern u64 kvm_nvhe_sym(id_aa64pfr0_el1_sys_val); |
137 | extern u64 kvm_nvhe_sym(id_aa64pfr1_el1_sys_val); | |
138 | extern u64 kvm_nvhe_sym(id_aa64isar0_el1_sys_val); | |
139 | extern u64 kvm_nvhe_sym(id_aa64isar1_el1_sys_val); | |
def8c222 | 140 | extern u64 kvm_nvhe_sym(id_aa64isar2_el1_sys_val); |
7c419937 MZ |
141 | extern u64 kvm_nvhe_sym(id_aa64mmfr0_el1_sys_val); |
142 | extern u64 kvm_nvhe_sym(id_aa64mmfr1_el1_sys_val); | |
6c30bfb1 | 143 | extern u64 kvm_nvhe_sym(id_aa64mmfr2_el1_sys_val); |
8669651c | 144 | extern u64 kvm_nvhe_sym(id_aa64smfr0_el1_sys_val); |
7c419937 | 145 | |
13e248aa | 146 | extern unsigned long kvm_nvhe_sym(__icache_flags); |
73f38ef2 WD |
147 | extern unsigned int kvm_nvhe_sym(kvm_arm_vmid_bits); |
148 | ||
c76a0a66 | 149 | #endif /* __ARM64_KVM_HYP_H__ */ |