Commit | Line | Data |
---|---|---|
b2441318 | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
5fdbf976 MT |
2 | #ifndef ASM_KVM_CACHE_REGS_H |
3 | #define ASM_KVM_CACHE_REGS_H | |
4 | ||
8ba2e525 SC |
5 | #include <linux/kvm_host.h> |
6 | ||
fb509f76 | 7 | #define KVM_POSSIBLE_CR0_GUEST_BITS (X86_CR0_TS | X86_CR0_WP) |
8ae09912 AK |
8 | #define KVM_POSSIBLE_CR4_GUEST_BITS \ |
9 | (X86_CR4_PVI | X86_CR4_DE | X86_CR4_PCE | X86_CR4_OSFXSR \ | |
30031c2b | 10 | | X86_CR4_OSXMMEXCPT | X86_CR4_PGE | X86_CR4_TSD | X86_CR4_FSGSBASE) |
8ae09912 | 11 | |
e63f315d | 12 | #define X86_CR0_PDPTR_BITS (X86_CR0_CD | X86_CR0_NW | X86_CR0_PG) |
5ec60aad | 13 | #define X86_CR4_TLBFLUSH_BITS (X86_CR4_PGE | X86_CR4_PCIDE | X86_CR4_PAE | X86_CR4_SMEP) |
a37ebdce | 14 | #define X86_CR4_PDPTR_BITS (X86_CR4_PGE | X86_CR4_PSE | X86_CR4_PAE | X86_CR4_SMEP) |
5ec60aad | 15 | |
e63f315d LJ |
16 | static_assert(!(KVM_POSSIBLE_CR0_GUEST_BITS & X86_CR0_PDPTR_BITS)); |
17 | ||
de3cd117 SC |
18 | #define BUILD_KVM_GPR_ACCESSORS(lname, uname) \ |
19 | static __always_inline unsigned long kvm_##lname##_read(struct kvm_vcpu *vcpu)\ | |
20 | { \ | |
21 | return vcpu->arch.regs[VCPU_REGS_##uname]; \ | |
22 | } \ | |
23 | static __always_inline void kvm_##lname##_write(struct kvm_vcpu *vcpu, \ | |
24 | unsigned long val) \ | |
25 | { \ | |
26 | vcpu->arch.regs[VCPU_REGS_##uname] = val; \ | |
27 | } | |
28 | BUILD_KVM_GPR_ACCESSORS(rax, RAX) | |
29 | BUILD_KVM_GPR_ACCESSORS(rbx, RBX) | |
30 | BUILD_KVM_GPR_ACCESSORS(rcx, RCX) | |
31 | BUILD_KVM_GPR_ACCESSORS(rdx, RDX) | |
32 | BUILD_KVM_GPR_ACCESSORS(rbp, RBP) | |
33 | BUILD_KVM_GPR_ACCESSORS(rsi, RSI) | |
34 | BUILD_KVM_GPR_ACCESSORS(rdi, RDI) | |
35 | #ifdef CONFIG_X86_64 | |
36 | BUILD_KVM_GPR_ACCESSORS(r8, R8) | |
37 | BUILD_KVM_GPR_ACCESSORS(r9, R9) | |
38 | BUILD_KVM_GPR_ACCESSORS(r10, R10) | |
39 | BUILD_KVM_GPR_ACCESSORS(r11, R11) | |
40 | BUILD_KVM_GPR_ACCESSORS(r12, R12) | |
41 | BUILD_KVM_GPR_ACCESSORS(r13, R13) | |
42 | BUILD_KVM_GPR_ACCESSORS(r14, R14) | |
43 | BUILD_KVM_GPR_ACCESSORS(r15, R15) | |
44 | #endif | |
45 | ||
41e68b69 PB |
46 | /* |
47 | * avail dirty | |
48 | * 0 0 register in VMCS/VMCB | |
49 | * 0 1 *INVALID* | |
50 | * 1 0 register in vcpu->arch | |
51 | * 1 1 register in vcpu->arch, needs to be stored back | |
52 | */ | |
aed89418 SC |
53 | static inline bool kvm_register_is_available(struct kvm_vcpu *vcpu, |
54 | enum kvm_reg reg) | |
55 | { | |
56 | return test_bit(reg, (unsigned long *)&vcpu->arch.regs_avail); | |
57 | } | |
58 | ||
59 | static inline bool kvm_register_is_dirty(struct kvm_vcpu *vcpu, | |
60 | enum kvm_reg reg) | |
61 | { | |
62 | return test_bit(reg, (unsigned long *)&vcpu->arch.regs_dirty); | |
63 | } | |
64 | ||
65 | static inline void kvm_register_mark_available(struct kvm_vcpu *vcpu, | |
66 | enum kvm_reg reg) | |
67 | { | |
68 | __set_bit(reg, (unsigned long *)&vcpu->arch.regs_avail); | |
69 | } | |
70 | ||
71 | static inline void kvm_register_mark_dirty(struct kvm_vcpu *vcpu, | |
72 | enum kvm_reg reg) | |
73 | { | |
74 | __set_bit(reg, (unsigned long *)&vcpu->arch.regs_avail); | |
75 | __set_bit(reg, (unsigned long *)&vcpu->arch.regs_dirty); | |
76 | } | |
77 | ||
fc9465be SC |
78 | /* |
79 | * kvm_register_test_and_mark_available() is a special snowflake that uses an | |
80 | * arch bitop directly to avoid the explicit instrumentation that comes with | |
81 | * the generic bitops. This allows code that cannot be instrumented (noinstr | |
82 | * functions), e.g. the low level VM-Enter/VM-Exit paths, to cache registers. | |
83 | */ | |
84 | static __always_inline bool kvm_register_test_and_mark_available(struct kvm_vcpu *vcpu, | |
85 | enum kvm_reg reg) | |
86 | { | |
87 | return arch___test_and_set_bit(reg, (unsigned long *)&vcpu->arch.regs_avail); | |
88 | } | |
89 | ||
27b4a9c4 SC |
90 | /* |
91 | * The "raw" register helpers are only for cases where the full 64 bits of a | |
92 | * register are read/written irrespective of current vCPU mode. In other words, | |
93 | * odds are good you shouldn't be using the raw variants. | |
94 | */ | |
95 | static inline unsigned long kvm_register_read_raw(struct kvm_vcpu *vcpu, int reg) | |
5fdbf976 | 96 | { |
489cbcf0 SC |
97 | if (WARN_ON_ONCE((unsigned int)reg >= NR_VCPU_REGS)) |
98 | return 0; | |
99 | ||
cb3c1e2f | 100 | if (!kvm_register_is_available(vcpu, reg)) |
b3646477 | 101 | static_call(kvm_x86_cache_reg)(vcpu, reg); |
5fdbf976 MT |
102 | |
103 | return vcpu->arch.regs[reg]; | |
104 | } | |
105 | ||
27b4a9c4 SC |
106 | static inline void kvm_register_write_raw(struct kvm_vcpu *vcpu, int reg, |
107 | unsigned long val) | |
5fdbf976 | 108 | { |
489cbcf0 SC |
109 | if (WARN_ON_ONCE((unsigned int)reg >= NR_VCPU_REGS)) |
110 | return; | |
111 | ||
5fdbf976 | 112 | vcpu->arch.regs[reg] = val; |
cb3c1e2f | 113 | kvm_register_mark_dirty(vcpu, reg); |
5fdbf976 MT |
114 | } |
115 | ||
116 | static inline unsigned long kvm_rip_read(struct kvm_vcpu *vcpu) | |
117 | { | |
27b4a9c4 | 118 | return kvm_register_read_raw(vcpu, VCPU_REGS_RIP); |
5fdbf976 MT |
119 | } |
120 | ||
121 | static inline void kvm_rip_write(struct kvm_vcpu *vcpu, unsigned long val) | |
122 | { | |
27b4a9c4 | 123 | kvm_register_write_raw(vcpu, VCPU_REGS_RIP, val); |
5fdbf976 MT |
124 | } |
125 | ||
e9c16c78 PB |
126 | static inline unsigned long kvm_rsp_read(struct kvm_vcpu *vcpu) |
127 | { | |
27b4a9c4 | 128 | return kvm_register_read_raw(vcpu, VCPU_REGS_RSP); |
e9c16c78 PB |
129 | } |
130 | ||
131 | static inline void kvm_rsp_write(struct kvm_vcpu *vcpu, unsigned long val) | |
132 | { | |
27b4a9c4 | 133 | kvm_register_write_raw(vcpu, VCPU_REGS_RSP, val); |
e9c16c78 PB |
134 | } |
135 | ||
6de4f3ad AK |
136 | static inline u64 kvm_pdptr_read(struct kvm_vcpu *vcpu, int index) |
137 | { | |
08acfa18 AK |
138 | might_sleep(); /* on svm */ |
139 | ||
cb3c1e2f | 140 | if (!kvm_register_is_available(vcpu, VCPU_EXREG_PDPTR)) |
b3646477 | 141 | static_call(kvm_x86_cache_reg)(vcpu, VCPU_EXREG_PDPTR); |
6de4f3ad | 142 | |
ff03a073 | 143 | return vcpu->arch.walk_mmu->pdptrs[index]; |
6de4f3ad AK |
144 | } |
145 | ||
6dba9403 ML |
146 | static inline void kvm_pdptr_write(struct kvm_vcpu *vcpu, int index, u64 value) |
147 | { | |
148 | vcpu->arch.walk_mmu->pdptrs[index] = value; | |
149 | } | |
150 | ||
4d4ec087 AK |
151 | static inline ulong kvm_read_cr0_bits(struct kvm_vcpu *vcpu, ulong mask) |
152 | { | |
8ae09912 | 153 | ulong tmask = mask & KVM_POSSIBLE_CR0_GUEST_BITS; |
bd31fe49 SC |
154 | if ((tmask & vcpu->arch.cr0_guest_owned_bits) && |
155 | !kvm_register_is_available(vcpu, VCPU_EXREG_CR0)) | |
b3646477 | 156 | static_call(kvm_x86_cache_reg)(vcpu, VCPU_EXREG_CR0); |
4d4ec087 AK |
157 | return vcpu->arch.cr0 & mask; |
158 | } | |
159 | ||
607475cf BW |
160 | static __always_inline bool kvm_is_cr0_bit_set(struct kvm_vcpu *vcpu, |
161 | unsigned long cr0_bit) | |
162 | { | |
163 | BUILD_BUG_ON(!is_power_of_2(cr0_bit)); | |
164 | ||
165 | return !!kvm_read_cr0_bits(vcpu, cr0_bit); | |
166 | } | |
167 | ||
4d4ec087 AK |
168 | static inline ulong kvm_read_cr0(struct kvm_vcpu *vcpu) |
169 | { | |
170 | return kvm_read_cr0_bits(vcpu, ~0UL); | |
171 | } | |
172 | ||
fc78f519 AK |
173 | static inline ulong kvm_read_cr4_bits(struct kvm_vcpu *vcpu, ulong mask) |
174 | { | |
8ae09912 | 175 | ulong tmask = mask & KVM_POSSIBLE_CR4_GUEST_BITS; |
f98c1e77 SC |
176 | if ((tmask & vcpu->arch.cr4_guest_owned_bits) && |
177 | !kvm_register_is_available(vcpu, VCPU_EXREG_CR4)) | |
b3646477 | 178 | static_call(kvm_x86_cache_reg)(vcpu, VCPU_EXREG_CR4); |
fc78f519 AK |
179 | return vcpu->arch.cr4 & mask; |
180 | } | |
181 | ||
607475cf BW |
182 | static __always_inline bool kvm_is_cr4_bit_set(struct kvm_vcpu *vcpu, |
183 | unsigned long cr4_bit) | |
184 | { | |
185 | BUILD_BUG_ON(!is_power_of_2(cr4_bit)); | |
186 | ||
187 | return !!kvm_read_cr4_bits(vcpu, cr4_bit); | |
188 | } | |
189 | ||
9f8fe504 AK |
190 | static inline ulong kvm_read_cr3(struct kvm_vcpu *vcpu) |
191 | { | |
cb3c1e2f | 192 | if (!kvm_register_is_available(vcpu, VCPU_EXREG_CR3)) |
b3646477 | 193 | static_call(kvm_x86_cache_reg)(vcpu, VCPU_EXREG_CR3); |
9f8fe504 AK |
194 | return vcpu->arch.cr3; |
195 | } | |
196 | ||
fc78f519 AK |
197 | static inline ulong kvm_read_cr4(struct kvm_vcpu *vcpu) |
198 | { | |
199 | return kvm_read_cr4_bits(vcpu, ~0UL); | |
200 | } | |
201 | ||
2acf923e DC |
202 | static inline u64 kvm_read_edx_eax(struct kvm_vcpu *vcpu) |
203 | { | |
de3cd117 SC |
204 | return (kvm_rax_read(vcpu) & -1u) |
205 | | ((u64)(kvm_rdx_read(vcpu) & -1u) << 32); | |
2acf923e DC |
206 | } |
207 | ||
ec9e60b2 JR |
208 | static inline void enter_guest_mode(struct kvm_vcpu *vcpu) |
209 | { | |
210 | vcpu->arch.hflags |= HF_GUEST_MASK; | |
d5a0483f | 211 | vcpu->stat.guest_mode = 1; |
ec9e60b2 JR |
212 | } |
213 | ||
214 | static inline void leave_guest_mode(struct kvm_vcpu *vcpu) | |
215 | { | |
216 | vcpu->arch.hflags &= ~HF_GUEST_MASK; | |
e40ff1d6 LA |
217 | |
218 | if (vcpu->arch.load_eoi_exitmap_pending) { | |
219 | vcpu->arch.load_eoi_exitmap_pending = false; | |
220 | kvm_make_request(KVM_REQ_LOAD_EOI_EXITMAP, vcpu); | |
221 | } | |
d5a0483f KS |
222 | |
223 | vcpu->stat.guest_mode = 0; | |
ec9e60b2 JR |
224 | } |
225 | ||
226 | static inline bool is_guest_mode(struct kvm_vcpu *vcpu) | |
227 | { | |
228 | return vcpu->arch.hflags & HF_GUEST_MASK; | |
229 | } | |
230 | ||
5fdbf976 | 231 | #endif |