78d17667e7ec282db12bed578a6c2ae8c5d04105
[linux-block.git] / arch / x86 / kvm / vmx / hyperv.h
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef __KVM_X86_VMX_HYPERV_H
3 #define __KVM_X86_VMX_HYPERV_H
4
5 #include <linux/jump_label.h>
6
7 #include <asm/hyperv-tlfs.h>
8 #include <asm/mshyperv.h>
9 #include <asm/vmx.h>
10
11 #include "../hyperv.h"
12
13 #include "capabilities.h"
14 #include "vmcs.h"
15 #include "vmcs12.h"
16
17 struct vmcs_config;
18
19 DECLARE_STATIC_KEY_FALSE(enable_evmcs);
20
21 #define current_evmcs ((struct hv_enlightened_vmcs *)this_cpu_read(current_vmcs))
22
23 #define KVM_EVMCS_VERSION 1
24
25 /*
26  * Enlightened VMCSv1 doesn't support these:
27  *
28  *      POSTED_INTR_NV                  = 0x00000002,
29  *      GUEST_INTR_STATUS               = 0x00000810,
30  *      APIC_ACCESS_ADDR                = 0x00002014,
31  *      POSTED_INTR_DESC_ADDR           = 0x00002016,
32  *      EOI_EXIT_BITMAP0                = 0x0000201c,
33  *      EOI_EXIT_BITMAP1                = 0x0000201e,
34  *      EOI_EXIT_BITMAP2                = 0x00002020,
35  *      EOI_EXIT_BITMAP3                = 0x00002022,
36  *      GUEST_PML_INDEX                 = 0x00000812,
37  *      PML_ADDRESS                     = 0x0000200e,
38  *      VM_FUNCTION_CONTROL             = 0x00002018,
39  *      EPTP_LIST_ADDRESS               = 0x00002024,
40  *      VMREAD_BITMAP                   = 0x00002026,
41  *      VMWRITE_BITMAP                  = 0x00002028,
42  *
43  *      TSC_MULTIPLIER                  = 0x00002032,
44  *      PLE_GAP                         = 0x00004020,
45  *      PLE_WINDOW                      = 0x00004022,
46  *      VMX_PREEMPTION_TIMER_VALUE      = 0x0000482E,
47  *
48  * Currently unsupported in KVM:
49  *      GUEST_IA32_RTIT_CTL             = 0x00002814,
50  */
51 #define EVMCS1_SUPPORTED_PINCTRL                                        \
52         (PIN_BASED_ALWAYSON_WITHOUT_TRUE_MSR |                          \
53          PIN_BASED_EXT_INTR_MASK |                                      \
54          PIN_BASED_NMI_EXITING |                                        \
55          PIN_BASED_VIRTUAL_NMIS)
56
57 #define EVMCS1_SUPPORTED_EXEC_CTRL                                      \
58         (CPU_BASED_ALWAYSON_WITHOUT_TRUE_MSR |                          \
59          CPU_BASED_HLT_EXITING |                                        \
60          CPU_BASED_CR3_LOAD_EXITING |                                   \
61          CPU_BASED_CR3_STORE_EXITING |                                  \
62          CPU_BASED_UNCOND_IO_EXITING |                                  \
63          CPU_BASED_MOV_DR_EXITING |                                     \
64          CPU_BASED_USE_TSC_OFFSETTING |                                 \
65          CPU_BASED_MWAIT_EXITING |                                      \
66          CPU_BASED_MONITOR_EXITING |                                    \
67          CPU_BASED_INVLPG_EXITING |                                     \
68          CPU_BASED_RDPMC_EXITING |                                      \
69          CPU_BASED_INTR_WINDOW_EXITING |                                \
70          CPU_BASED_CR8_LOAD_EXITING |                                   \
71          CPU_BASED_CR8_STORE_EXITING |                                  \
72          CPU_BASED_RDTSC_EXITING |                                      \
73          CPU_BASED_TPR_SHADOW |                                         \
74          CPU_BASED_USE_IO_BITMAPS |                                     \
75          CPU_BASED_MONITOR_TRAP_FLAG |                                  \
76          CPU_BASED_USE_MSR_BITMAPS |                                    \
77          CPU_BASED_NMI_WINDOW_EXITING |                                 \
78          CPU_BASED_PAUSE_EXITING |                                      \
79          CPU_BASED_ACTIVATE_SECONDARY_CONTROLS)
80
81 #define EVMCS1_SUPPORTED_2NDEXEC                                        \
82         (SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE |                        \
83          SECONDARY_EXEC_WBINVD_EXITING |                                \
84          SECONDARY_EXEC_ENABLE_VPID |                                   \
85          SECONDARY_EXEC_ENABLE_EPT |                                    \
86          SECONDARY_EXEC_UNRESTRICTED_GUEST |                            \
87          SECONDARY_EXEC_DESC |                                          \
88          SECONDARY_EXEC_ENABLE_RDTSCP |                                 \
89          SECONDARY_EXEC_ENABLE_INVPCID |                                \
90          SECONDARY_EXEC_XSAVES |                                        \
91          SECONDARY_EXEC_RDSEED_EXITING |                                \
92          SECONDARY_EXEC_RDRAND_EXITING |                                \
93          SECONDARY_EXEC_TSC_SCALING |                                   \
94          SECONDARY_EXEC_ENABLE_USR_WAIT_PAUSE |                         \
95          SECONDARY_EXEC_PT_USE_GPA |                                    \
96          SECONDARY_EXEC_PT_CONCEAL_VMX |                                \
97          SECONDARY_EXEC_BUS_LOCK_DETECTION |                            \
98          SECONDARY_EXEC_NOTIFY_VM_EXITING |                             \
99          SECONDARY_EXEC_ENCLS_EXITING)
100
101 #define EVMCS1_SUPPORTED_3RDEXEC (0ULL)
102
103 #define EVMCS1_SUPPORTED_VMEXIT_CTRL                                    \
104         (VM_EXIT_ALWAYSON_WITHOUT_TRUE_MSR |                            \
105          VM_EXIT_SAVE_DEBUG_CONTROLS |                                  \
106          VM_EXIT_ACK_INTR_ON_EXIT |                                     \
107          VM_EXIT_HOST_ADDR_SPACE_SIZE |                                 \
108          VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL |                           \
109          VM_EXIT_SAVE_IA32_PAT |                                        \
110          VM_EXIT_LOAD_IA32_PAT |                                        \
111          VM_EXIT_SAVE_IA32_EFER |                                       \
112          VM_EXIT_LOAD_IA32_EFER |                                       \
113          VM_EXIT_CLEAR_BNDCFGS |                                        \
114          VM_EXIT_PT_CONCEAL_PIP |                                       \
115          VM_EXIT_CLEAR_IA32_RTIT_CTL)
116
117 #define EVMCS1_SUPPORTED_VMENTRY_CTRL                                   \
118         (VM_ENTRY_ALWAYSON_WITHOUT_TRUE_MSR |                           \
119          VM_ENTRY_LOAD_DEBUG_CONTROLS |                                 \
120          VM_ENTRY_IA32E_MODE |                                          \
121          VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL |                          \
122          VM_ENTRY_LOAD_IA32_PAT |                                       \
123          VM_ENTRY_LOAD_IA32_EFER |                                      \
124          VM_ENTRY_LOAD_BNDCFGS |                                        \
125          VM_ENTRY_PT_CONCEAL_PIP |                                      \
126          VM_ENTRY_LOAD_IA32_RTIT_CTL)
127
128 #define EVMCS1_SUPPORTED_VMFUNC (0)
129
130 struct evmcs_field {
131         u16 offset;
132         u16 clean_field;
133 };
134
135 extern const struct evmcs_field vmcs_field_to_evmcs_1[];
136 extern const unsigned int nr_evmcs_1_fields;
137
138 static __always_inline int evmcs_field_offset(unsigned long field,
139                                               u16 *clean_field)
140 {
141         unsigned int index = ROL16(field, 6);
142         const struct evmcs_field *evmcs_field;
143
144         if (unlikely(index >= nr_evmcs_1_fields))
145                 return -ENOENT;
146
147         evmcs_field = &vmcs_field_to_evmcs_1[index];
148
149         /*
150          * Use offset=0 to detect holes in eVMCS. This offset belongs to
151          * 'revision_id' but this field has no encoding and is supposed to
152          * be accessed directly.
153          */
154         if (unlikely(!evmcs_field->offset))
155                 return -ENOENT;
156
157         if (clean_field)
158                 *clean_field = evmcs_field->clean_field;
159
160         return evmcs_field->offset;
161 }
162
163 static inline u64 evmcs_read_any(struct hv_enlightened_vmcs *evmcs,
164                                  unsigned long field, u16 offset)
165 {
166         /*
167          * vmcs12_read_any() doesn't care whether the supplied structure
168          * is 'struct vmcs12' or 'struct hv_enlightened_vmcs' as it takes
169          * the exact offset of the required field, use it for convenience
170          * here.
171          */
172         return vmcs12_read_any((void *)evmcs, field, offset);
173 }
174
175 #if IS_ENABLED(CONFIG_HYPERV)
176
177 static __always_inline int get_evmcs_offset(unsigned long field,
178                                             u16 *clean_field)
179 {
180         int offset = evmcs_field_offset(field, clean_field);
181
182         WARN_ONCE(offset < 0, "accessing unsupported EVMCS field %lx\n", field);
183         return offset;
184 }
185
186 static __always_inline void evmcs_write64(unsigned long field, u64 value)
187 {
188         u16 clean_field;
189         int offset = get_evmcs_offset(field, &clean_field);
190
191         if (offset < 0)
192                 return;
193
194         *(u64 *)((char *)current_evmcs + offset) = value;
195
196         current_evmcs->hv_clean_fields &= ~clean_field;
197 }
198
199 static __always_inline void evmcs_write32(unsigned long field, u32 value)
200 {
201         u16 clean_field;
202         int offset = get_evmcs_offset(field, &clean_field);
203
204         if (offset < 0)
205                 return;
206
207         *(u32 *)((char *)current_evmcs + offset) = value;
208         current_evmcs->hv_clean_fields &= ~clean_field;
209 }
210
211 static __always_inline void evmcs_write16(unsigned long field, u16 value)
212 {
213         u16 clean_field;
214         int offset = get_evmcs_offset(field, &clean_field);
215
216         if (offset < 0)
217                 return;
218
219         *(u16 *)((char *)current_evmcs + offset) = value;
220         current_evmcs->hv_clean_fields &= ~clean_field;
221 }
222
223 static __always_inline u64 evmcs_read64(unsigned long field)
224 {
225         int offset = get_evmcs_offset(field, NULL);
226
227         if (offset < 0)
228                 return 0;
229
230         return *(u64 *)((char *)current_evmcs + offset);
231 }
232
233 static __always_inline u32 evmcs_read32(unsigned long field)
234 {
235         int offset = get_evmcs_offset(field, NULL);
236
237         if (offset < 0)
238                 return 0;
239
240         return *(u32 *)((char *)current_evmcs + offset);
241 }
242
243 static __always_inline u16 evmcs_read16(unsigned long field)
244 {
245         int offset = get_evmcs_offset(field, NULL);
246
247         if (offset < 0)
248                 return 0;
249
250         return *(u16 *)((char *)current_evmcs + offset);
251 }
252
253 static inline void evmcs_load(u64 phys_addr)
254 {
255         struct hv_vp_assist_page *vp_ap =
256                 hv_get_vp_assist_page(smp_processor_id());
257
258         if (current_evmcs->hv_enlightenments_control.nested_flush_hypercall)
259                 vp_ap->nested_control.features.directhypercall = 1;
260         vp_ap->current_nested_vmcs = phys_addr;
261         vp_ap->enlighten_vmentry = 1;
262 }
263
264 void evmcs_sanitize_exec_ctrls(struct vmcs_config *vmcs_conf);
265 #else /* !IS_ENABLED(CONFIG_HYPERV) */
266 static __always_inline void evmcs_write64(unsigned long field, u64 value) {}
267 static __always_inline void evmcs_write32(unsigned long field, u32 value) {}
268 static __always_inline void evmcs_write16(unsigned long field, u16 value) {}
269 static __always_inline u64 evmcs_read64(unsigned long field) { return 0; }
270 static __always_inline u32 evmcs_read32(unsigned long field) { return 0; }
271 static __always_inline u16 evmcs_read16(unsigned long field) { return 0; }
272 static inline void evmcs_load(u64 phys_addr) {}
273 #endif /* IS_ENABLED(CONFIG_HYPERV) */
274
275 #define EVMPTR_INVALID (-1ULL)
276 #define EVMPTR_MAP_PENDING (-2ULL)
277
278 static inline bool evmptr_is_valid(u64 evmptr)
279 {
280         return evmptr != EVMPTR_INVALID && evmptr != EVMPTR_MAP_PENDING;
281 }
282
283 enum nested_evmptrld_status {
284         EVMPTRLD_DISABLED,
285         EVMPTRLD_SUCCEEDED,
286         EVMPTRLD_VMFAIL,
287         EVMPTRLD_ERROR,
288 };
289
290 u64 nested_get_evmptr(struct kvm_vcpu *vcpu);
291 uint16_t nested_get_evmcs_version(struct kvm_vcpu *vcpu);
292 int nested_enable_evmcs(struct kvm_vcpu *vcpu,
293                         uint16_t *vmcs_version);
294 void nested_evmcs_filter_control_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata);
295 int nested_evmcs_check_controls(struct vmcs12 *vmcs12);
296 bool nested_evmcs_l2_tlb_flush_enabled(struct kvm_vcpu *vcpu);
297 void vmx_hv_inject_synthetic_vmexit_post_tlb_flush(struct kvm_vcpu *vcpu);
298
299 #endif /* __KVM_X86_VMX_HYPERV_H */