KVM: VMX: Rename "vmx/evmcs.{ch}" to "vmx/hyperv.{ch}"
[linux-2.6-block.git] / arch / x86 / kvm / vmx / vmx_ops.h
CommitLineData
89b0c9f5
SC
1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef __KVM_X86_VMX_INSN_H
3#define __KVM_X86_VMX_INSN_H
4
5#include <linux/nospec.h>
6
89b0c9f5
SC
7#include <asm/vmx.h>
8
a789aeba 9#include "hyperv.h"
89b0c9f5 10#include "vmcs.h"
07853adc 11#include "../x86.h"
89b0c9f5 12
57abfa11 13void vmread_error(unsigned long field, bool fault);
842f4be9
SC
14__attribute__((regparm(0))) void vmread_error_trampoline(unsigned long field,
15 bool fault);
52a9fcbc
SC
16void vmwrite_error(unsigned long field, unsigned long value);
17void vmclear_error(struct vmcs *vmcs, u64 phys_addr);
18void vmptrld_error(struct vmcs *vmcs, u64 phys_addr);
19void invvpid_error(unsigned long ext, u16 vpid, gva_t gva);
20void invept_error(unsigned long ext, u64 eptp, gpa_t gpa);
21
89b0c9f5
SC
22static __always_inline void vmcs_check16(unsigned long field)
23{
24 BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6001) == 0x2000,
25 "16-bit accessor invalid for 64-bit field");
26 BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6001) == 0x2001,
27 "16-bit accessor invalid for 64-bit high field");
28 BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0x4000,
29 "16-bit accessor invalid for 32-bit high field");
30 BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0x6000,
31 "16-bit accessor invalid for natural width field");
32}
33
34static __always_inline void vmcs_check32(unsigned long field)
35{
36 BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0,
37 "32-bit accessor invalid for 16-bit field");
870c575a
HL
38 BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6001) == 0x2000,
39 "32-bit accessor invalid for 64-bit field");
40 BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6001) == 0x2001,
41 "32-bit accessor invalid for 64-bit high field");
89b0c9f5
SC
42 BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0x6000,
43 "32-bit accessor invalid for natural width field");
44}
45
46static __always_inline void vmcs_check64(unsigned long field)
47{
48 BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0,
49 "64-bit accessor invalid for 16-bit field");
50 BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6001) == 0x2001,
51 "64-bit accessor invalid for 64-bit high field");
52 BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0x4000,
53 "64-bit accessor invalid for 32-bit field");
54 BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0x6000,
55 "64-bit accessor invalid for natural width field");
56}
57
58static __always_inline void vmcs_checkl(unsigned long field)
59{
60 BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0,
61 "Natural width accessor invalid for 16-bit field");
62 BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6001) == 0x2000,
63 "Natural width accessor invalid for 64-bit field");
64 BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6001) == 0x2001,
65 "Natural width accessor invalid for 64-bit high field");
66 BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0x4000,
67 "Natural width accessor invalid for 32-bit field");
68}
69
70static __always_inline unsigned long __vmcs_readl(unsigned long field)
71{
72 unsigned long value;
73
907d1393
PZ
74#ifdef CONFIG_CC_HAS_ASM_GOTO_OUTPUT
75
76 asm_volatile_goto("1: vmread %[field], %[output]\n\t"
77 "jna %l[do_fail]\n\t"
78
79 _ASM_EXTABLE(1b, %l[do_exception])
80
81 : [output] "=r" (value)
82 : [field] "r" (field)
83 : "cc"
84 : do_fail, do_exception);
85
86 return value;
87
88do_fail:
89 WARN_ONCE(1, "kvm: vmread failed: field=%lx\n", field);
90 pr_warn_ratelimited("kvm: vmread failed: field=%lx\n", field);
91 return 0;
92
93do_exception:
94 kvm_spurious_fault();
95 return 0;
96
97#else /* !CONFIG_CC_HAS_ASM_GOTO_OUTPUT */
98
6e202097
SC
99 asm volatile("1: vmread %2, %1\n\t"
100 ".byte 0x3e\n\t" /* branch taken hint */
101 "ja 3f\n\t"
842f4be9
SC
102
103 /*
104 * VMREAD failed. Push '0' for @fault, push the failing
105 * @field, and bounce through the trampoline to preserve
106 * volatile registers.
107 */
3e8ea780
PZ
108 "xorl %k1, %k1\n\t"
109 "2:\n\t"
110 "push %1\n\t"
842f4be9 111 "push %2\n\t"
3e8ea780 112 "call vmread_error_trampoline\n\t"
842f4be9
SC
113
114 /*
115 * Unwind the stack. Note, the trampoline zeros out the
116 * memory for @fault so that the result is '0' on error.
117 */
118 "pop %2\n\t"
119 "pop %1\n\t"
6e202097
SC
120 "3:\n\t"
121
842f4be9 122 /* VMREAD faulted. As above, except push '1' for @fault. */
3e8ea780
PZ
123 _ASM_EXTABLE_TYPE_REG(1b, 2b, EX_TYPE_ONE_REG, %1)
124
125 : ASM_CALL_CONSTRAINT, "=&r"(value) : "r"(field) : "cc");
89b0c9f5 126 return value;
907d1393
PZ
127
128#endif /* CONFIG_CC_HAS_ASM_GOTO_OUTPUT */
89b0c9f5
SC
129}
130
131static __always_inline u16 vmcs_read16(unsigned long field)
132{
133 vmcs_check16(field);
134 if (static_branch_unlikely(&enable_evmcs))
135 return evmcs_read16(field);
136 return __vmcs_readl(field);
137}
138
139static __always_inline u32 vmcs_read32(unsigned long field)
140{
141 vmcs_check32(field);
142 if (static_branch_unlikely(&enable_evmcs))
143 return evmcs_read32(field);
144 return __vmcs_readl(field);
145}
146
147static __always_inline u64 vmcs_read64(unsigned long field)
148{
149 vmcs_check64(field);
150 if (static_branch_unlikely(&enable_evmcs))
151 return evmcs_read64(field);
152#ifdef CONFIG_X86_64
153 return __vmcs_readl(field);
154#else
155 return __vmcs_readl(field) | ((u64)__vmcs_readl(field+1) << 32);
156#endif
157}
158
159static __always_inline unsigned long vmcs_readl(unsigned long field)
160{
161 vmcs_checkl(field);
162 if (static_branch_unlikely(&enable_evmcs))
163 return evmcs_read64(field);
164 return __vmcs_readl(field);
165}
166
52a9fcbc
SC
167#define vmx_asm1(insn, op1, error_args...) \
168do { \
169 asm_volatile_goto("1: " __stringify(insn) " %0\n\t" \
170 ".byte 0x2e\n\t" /* branch not taken hint */ \
171 "jna %l[error]\n\t" \
172 _ASM_EXTABLE(1b, %l[fault]) \
173 : : op1 : "cc" : error, fault); \
174 return; \
175error: \
3ebccdf3 176 instrumentation_begin(); \
52a9fcbc 177 insn##_error(error_args); \
3ebccdf3 178 instrumentation_end(); \
52a9fcbc
SC
179 return; \
180fault: \
181 kvm_spurious_fault(); \
182} while (0)
183
184#define vmx_asm2(insn, op1, op2, error_args...) \
185do { \
186 asm_volatile_goto("1: " __stringify(insn) " %1, %0\n\t" \
187 ".byte 0x2e\n\t" /* branch not taken hint */ \
188 "jna %l[error]\n\t" \
189 _ASM_EXTABLE(1b, %l[fault]) \
190 : : op1, op2 : "cc" : error, fault); \
191 return; \
192error: \
3ebccdf3 193 instrumentation_begin(); \
52a9fcbc 194 insn##_error(error_args); \
3ebccdf3 195 instrumentation_end(); \
52a9fcbc
SC
196 return; \
197fault: \
198 kvm_spurious_fault(); \
199} while (0)
89b0c9f5
SC
200
201static __always_inline void __vmcs_writel(unsigned long field, unsigned long value)
202{
52a9fcbc 203 vmx_asm2(vmwrite, "r"(field), "rm"(value), field, value);
89b0c9f5
SC
204}
205
206static __always_inline void vmcs_write16(unsigned long field, u16 value)
207{
208 vmcs_check16(field);
209 if (static_branch_unlikely(&enable_evmcs))
210 return evmcs_write16(field, value);
211
212 __vmcs_writel(field, value);
213}
214
215static __always_inline void vmcs_write32(unsigned long field, u32 value)
216{
217 vmcs_check32(field);
218 if (static_branch_unlikely(&enable_evmcs))
219 return evmcs_write32(field, value);
220
221 __vmcs_writel(field, value);
222}
223
224static __always_inline void vmcs_write64(unsigned long field, u64 value)
225{
226 vmcs_check64(field);
227 if (static_branch_unlikely(&enable_evmcs))
228 return evmcs_write64(field, value);
229
230 __vmcs_writel(field, value);
231#ifndef CONFIG_X86_64
89b0c9f5
SC
232 __vmcs_writel(field+1, value >> 32);
233#endif
234}
235
236static __always_inline void vmcs_writel(unsigned long field, unsigned long value)
237{
238 vmcs_checkl(field);
239 if (static_branch_unlikely(&enable_evmcs))
240 return evmcs_write64(field, value);
241
242 __vmcs_writel(field, value);
243}
244
245static __always_inline void vmcs_clear_bits(unsigned long field, u32 mask)
246{
247 BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0x2000,
248 "vmcs_clear_bits does not support 64-bit fields");
249 if (static_branch_unlikely(&enable_evmcs))
250 return evmcs_write32(field, evmcs_read32(field) & ~mask);
251
252 __vmcs_writel(field, __vmcs_readl(field) & ~mask);
253}
254
255static __always_inline void vmcs_set_bits(unsigned long field, u32 mask)
256{
257 BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0x2000,
258 "vmcs_set_bits does not support 64-bit fields");
259 if (static_branch_unlikely(&enable_evmcs))
260 return evmcs_write32(field, evmcs_read32(field) | mask);
261
262 __vmcs_writel(field, __vmcs_readl(field) | mask);
263}
264
265static inline void vmcs_clear(struct vmcs *vmcs)
266{
267 u64 phys_addr = __pa(vmcs);
89b0c9f5 268
52a9fcbc 269 vmx_asm1(vmclear, "m"(phys_addr), vmcs, phys_addr);
89b0c9f5
SC
270}
271
272static inline void vmcs_load(struct vmcs *vmcs)
273{
274 u64 phys_addr = __pa(vmcs);
89b0c9f5
SC
275
276 if (static_branch_unlikely(&enable_evmcs))
277 return evmcs_load(phys_addr);
278
52a9fcbc 279 vmx_asm1(vmptrld, "m"(phys_addr), vmcs, phys_addr);
89b0c9f5
SC
280}
281
282static inline void __invvpid(unsigned long ext, u16 vpid, gva_t gva)
283{
284 struct {
285 u64 vpid : 16;
286 u64 rsvd : 48;
287 u64 gva;
288 } operand = { vpid, 0, gva };
89b0c9f5 289
52a9fcbc 290 vmx_asm2(invvpid, "r"(ext), "m"(operand), ext, vpid, gva);
89b0c9f5
SC
291}
292
293static inline void __invept(unsigned long ext, u64 eptp, gpa_t gpa)
294{
295 struct {
296 u64 eptp, gpa;
297 } operand = {eptp, gpa};
89b0c9f5 298
52a9fcbc 299 vmx_asm2(invept, "r"(ext), "m"(operand), ext, eptp, gpa);
89b0c9f5
SC
300}
301
89b0c9f5
SC
302static inline void vpid_sync_vcpu_single(int vpid)
303{
304 if (vpid == 0)
305 return;
306
ca431c0c 307 __invvpid(VMX_VPID_EXTENT_SINGLE_CONTEXT, vpid, 0);
89b0c9f5
SC
308}
309
310static inline void vpid_sync_vcpu_global(void)
311{
ca431c0c 312 __invvpid(VMX_VPID_EXTENT_ALL_CONTEXT, 0, 0);
89b0c9f5
SC
313}
314
315static inline void vpid_sync_context(int vpid)
316{
317 if (cpu_has_vmx_invvpid_single())
318 vpid_sync_vcpu_single(vpid);
c746b3a4 319 else if (vpid != 0)
89b0c9f5
SC
320 vpid_sync_vcpu_global();
321}
322
ab4b3597 323static inline void vpid_sync_vcpu_addr(int vpid, gva_t addr)
8a8b097c
SC
324{
325 if (vpid == 0)
ab4b3597 326 return;
8a8b097c 327
ab4b3597 328 if (cpu_has_vmx_invvpid_individual_addr())
8a8b097c 329 __invvpid(VMX_VPID_EXTENT_INDIVIDUAL_ADDR, vpid, addr);
ab4b3597
SC
330 else
331 vpid_sync_context(vpid);
8a8b097c
SC
332}
333
89b0c9f5
SC
334static inline void ept_sync_global(void)
335{
336 __invept(VMX_EPT_EXTENT_GLOBAL, 0, 0);
337}
338
339static inline void ept_sync_context(u64 eptp)
340{
341 if (cpu_has_vmx_invept_context())
342 __invept(VMX_EPT_EXTENT_CONTEXT, eptp, 0);
343 else
344 ept_sync_global();
345}
346
347#endif /* __KVM_X86_VMX_INSN_H */