53ffa43c90db1b0d8eb16b956ae7aaeddffe051a
[linux-block.git] / tools / testing / selftests / kvm / include / x86_64 / processor.h
1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3  * tools/testing/selftests/kvm/include/x86_64/processor.h
4  *
5  * Copyright (C) 2018, Google LLC.
6  */
7
8 #ifndef SELFTEST_KVM_PROCESSOR_H
9 #define SELFTEST_KVM_PROCESSOR_H
10
11 #include <assert.h>
12 #include <stdint.h>
13 #include <syscall.h>
14
15 #include <asm/msr-index.h>
16 #include <asm/prctl.h>
17
18 #include <linux/stringify.h>
19
20 #include "../kvm_util.h"
21
22 extern bool host_cpu_is_intel;
23 extern bool host_cpu_is_amd;
24
25 #define NMI_VECTOR              0x02
26
27 #define X86_EFLAGS_FIXED         (1u << 1)
28
29 #define X86_CR4_VME             (1ul << 0)
30 #define X86_CR4_PVI             (1ul << 1)
31 #define X86_CR4_TSD             (1ul << 2)
32 #define X86_CR4_DE              (1ul << 3)
33 #define X86_CR4_PSE             (1ul << 4)
34 #define X86_CR4_PAE             (1ul << 5)
35 #define X86_CR4_MCE             (1ul << 6)
36 #define X86_CR4_PGE             (1ul << 7)
37 #define X86_CR4_PCE             (1ul << 8)
38 #define X86_CR4_OSFXSR          (1ul << 9)
39 #define X86_CR4_OSXMMEXCPT      (1ul << 10)
40 #define X86_CR4_UMIP            (1ul << 11)
41 #define X86_CR4_LA57            (1ul << 12)
42 #define X86_CR4_VMXE            (1ul << 13)
43 #define X86_CR4_SMXE            (1ul << 14)
44 #define X86_CR4_FSGSBASE        (1ul << 16)
45 #define X86_CR4_PCIDE           (1ul << 17)
46 #define X86_CR4_OSXSAVE         (1ul << 18)
47 #define X86_CR4_SMEP            (1ul << 20)
48 #define X86_CR4_SMAP            (1ul << 21)
49 #define X86_CR4_PKE             (1ul << 22)
50
51 /* Note, these are ordered alphabetically to match kvm_cpuid_entry2.  Eww. */
52 enum cpuid_output_regs {
53         KVM_CPUID_EAX,
54         KVM_CPUID_EBX,
55         KVM_CPUID_ECX,
56         KVM_CPUID_EDX
57 };
58
59 /*
60  * Pack the information into a 64-bit value so that each X86_FEATURE_XXX can be
61  * passed by value with no overhead.
62  */
63 struct kvm_x86_cpu_feature {
64         u32     function;
65         u16     index;
66         u8      reg;
67         u8      bit;
68 };
69 #define KVM_X86_CPU_FEATURE(fn, idx, gpr, __bit)                                \
70 ({                                                                              \
71         struct kvm_x86_cpu_feature feature = {                                  \
72                 .function = fn,                                                 \
73                 .index = idx,                                                   \
74                 .reg = KVM_CPUID_##gpr,                                         \
75                 .bit = __bit,                                                   \
76         };                                                                      \
77                                                                                 \
78         kvm_static_assert((fn & 0xc0000000) == 0 ||                             \
79                           (fn & 0xc0000000) == 0x40000000 ||                    \
80                           (fn & 0xc0000000) == 0x80000000 ||                    \
81                           (fn & 0xc0000000) == 0xc0000000);                     \
82         kvm_static_assert(idx < BIT(sizeof(feature.index) * BITS_PER_BYTE));    \
83         feature;                                                                \
84 })
85
86 /*
87  * Basic Leafs, a.k.a. Intel defined
88  */
89 #define X86_FEATURE_MWAIT               KVM_X86_CPU_FEATURE(0x1, 0, ECX, 3)
90 #define X86_FEATURE_VMX                 KVM_X86_CPU_FEATURE(0x1, 0, ECX, 5)
91 #define X86_FEATURE_SMX                 KVM_X86_CPU_FEATURE(0x1, 0, ECX, 6)
92 #define X86_FEATURE_PDCM                KVM_X86_CPU_FEATURE(0x1, 0, ECX, 15)
93 #define X86_FEATURE_PCID                KVM_X86_CPU_FEATURE(0x1, 0, ECX, 17)
94 #define X86_FEATURE_X2APIC              KVM_X86_CPU_FEATURE(0x1, 0, ECX, 21)
95 #define X86_FEATURE_MOVBE               KVM_X86_CPU_FEATURE(0x1, 0, ECX, 22)
96 #define X86_FEATURE_TSC_DEADLINE_TIMER  KVM_X86_CPU_FEATURE(0x1, 0, ECX, 24)
97 #define X86_FEATURE_XSAVE               KVM_X86_CPU_FEATURE(0x1, 0, ECX, 26)
98 #define X86_FEATURE_OSXSAVE             KVM_X86_CPU_FEATURE(0x1, 0, ECX, 27)
99 #define X86_FEATURE_RDRAND              KVM_X86_CPU_FEATURE(0x1, 0, ECX, 30)
100 #define X86_FEATURE_HYPERVISOR          KVM_X86_CPU_FEATURE(0x1, 0, ECX, 31)
101 #define X86_FEATURE_PAE                 KVM_X86_CPU_FEATURE(0x1, 0, EDX, 6)
102 #define X86_FEATURE_MCE                 KVM_X86_CPU_FEATURE(0x1, 0, EDX, 7)
103 #define X86_FEATURE_APIC                KVM_X86_CPU_FEATURE(0x1, 0, EDX, 9)
104 #define X86_FEATURE_CLFLUSH             KVM_X86_CPU_FEATURE(0x1, 0, EDX, 19)
105 #define X86_FEATURE_XMM                 KVM_X86_CPU_FEATURE(0x1, 0, EDX, 25)
106 #define X86_FEATURE_XMM2                KVM_X86_CPU_FEATURE(0x1, 0, EDX, 26)
107 #define X86_FEATURE_FSGSBASE            KVM_X86_CPU_FEATURE(0x7, 0, EBX, 0)
108 #define X86_FEATURE_TSC_ADJUST          KVM_X86_CPU_FEATURE(0x7, 0, EBX, 1)
109 #define X86_FEATURE_SGX                 KVM_X86_CPU_FEATURE(0x7, 0, EBX, 2)
110 #define X86_FEATURE_HLE                 KVM_X86_CPU_FEATURE(0x7, 0, EBX, 4)
111 #define X86_FEATURE_SMEP                KVM_X86_CPU_FEATURE(0x7, 0, EBX, 7)
112 #define X86_FEATURE_INVPCID             KVM_X86_CPU_FEATURE(0x7, 0, EBX, 10)
113 #define X86_FEATURE_RTM                 KVM_X86_CPU_FEATURE(0x7, 0, EBX, 11)
114 #define X86_FEATURE_MPX                 KVM_X86_CPU_FEATURE(0x7, 0, EBX, 14)
115 #define X86_FEATURE_SMAP                KVM_X86_CPU_FEATURE(0x7, 0, EBX, 20)
116 #define X86_FEATURE_PCOMMIT             KVM_X86_CPU_FEATURE(0x7, 0, EBX, 22)
117 #define X86_FEATURE_CLFLUSHOPT          KVM_X86_CPU_FEATURE(0x7, 0, EBX, 23)
118 #define X86_FEATURE_CLWB                KVM_X86_CPU_FEATURE(0x7, 0, EBX, 24)
119 #define X86_FEATURE_UMIP                KVM_X86_CPU_FEATURE(0x7, 0, ECX, 2)
120 #define X86_FEATURE_PKU                 KVM_X86_CPU_FEATURE(0x7, 0, ECX, 3)
121 #define X86_FEATURE_LA57                KVM_X86_CPU_FEATURE(0x7, 0, ECX, 16)
122 #define X86_FEATURE_RDPID               KVM_X86_CPU_FEATURE(0x7, 0, ECX, 22)
123 #define X86_FEATURE_SGX_LC              KVM_X86_CPU_FEATURE(0x7, 0, ECX, 30)
124 #define X86_FEATURE_SHSTK               KVM_X86_CPU_FEATURE(0x7, 0, ECX, 7)
125 #define X86_FEATURE_IBT                 KVM_X86_CPU_FEATURE(0x7, 0, EDX, 20)
126 #define X86_FEATURE_AMX_TILE            KVM_X86_CPU_FEATURE(0x7, 0, EDX, 24)
127 #define X86_FEATURE_SPEC_CTRL           KVM_X86_CPU_FEATURE(0x7, 0, EDX, 26)
128 #define X86_FEATURE_ARCH_CAPABILITIES   KVM_X86_CPU_FEATURE(0x7, 0, EDX, 29)
129 #define X86_FEATURE_PKS                 KVM_X86_CPU_FEATURE(0x7, 0, ECX, 31)
130 #define X86_FEATURE_XTILECFG            KVM_X86_CPU_FEATURE(0xD, 0, EAX, 17)
131 #define X86_FEATURE_XTILEDATA           KVM_X86_CPU_FEATURE(0xD, 0, EAX, 18)
132 #define X86_FEATURE_XSAVES              KVM_X86_CPU_FEATURE(0xD, 1, EAX, 3)
133 #define X86_FEATURE_XFD                 KVM_X86_CPU_FEATURE(0xD, 1, EAX, 4)
134
135 /*
136  * Extended Leafs, a.k.a. AMD defined
137  */
138 #define X86_FEATURE_SVM                 KVM_X86_CPU_FEATURE(0x80000001, 0, ECX, 2)
139 #define X86_FEATURE_NX                  KVM_X86_CPU_FEATURE(0x80000001, 0, EDX, 20)
140 #define X86_FEATURE_GBPAGES             KVM_X86_CPU_FEATURE(0x80000001, 0, EDX, 26)
141 #define X86_FEATURE_RDTSCP              KVM_X86_CPU_FEATURE(0x80000001, 0, EDX, 27)
142 #define X86_FEATURE_LM                  KVM_X86_CPU_FEATURE(0x80000001, 0, EDX, 29)
143 #define X86_FEATURE_INVTSC              KVM_X86_CPU_FEATURE(0x80000007, 0, EDX, 8)
144 #define X86_FEATURE_RDPRU               KVM_X86_CPU_FEATURE(0x80000008, 0, EBX, 4)
145 #define X86_FEATURE_AMD_IBPB            KVM_X86_CPU_FEATURE(0x80000008, 0, EBX, 12)
146 #define X86_FEATURE_NPT                 KVM_X86_CPU_FEATURE(0x8000000A, 0, EDX, 0)
147 #define X86_FEATURE_LBRV                KVM_X86_CPU_FEATURE(0x8000000A, 0, EDX, 1)
148 #define X86_FEATURE_NRIPS               KVM_X86_CPU_FEATURE(0x8000000A, 0, EDX, 3)
149 #define X86_FEATURE_TSCRATEMSR          KVM_X86_CPU_FEATURE(0x8000000A, 0, EDX, 4)
150 #define X86_FEATURE_PAUSEFILTER         KVM_X86_CPU_FEATURE(0x8000000A, 0, EDX, 10)
151 #define X86_FEATURE_PFTHRESHOLD         KVM_X86_CPU_FEATURE(0x8000000A, 0, EDX, 12)
152 #define X86_FEATURE_VGIF                KVM_X86_CPU_FEATURE(0x8000000A, 0, EDX, 16)
153 #define X86_FEATURE_SEV                 KVM_X86_CPU_FEATURE(0x8000001F, 0, EAX, 1)
154 #define X86_FEATURE_SEV_ES              KVM_X86_CPU_FEATURE(0x8000001F, 0, EAX, 3)
155
156 /*
157  * KVM defined paravirt features.
158  */
159 #define X86_FEATURE_KVM_CLOCKSOURCE     KVM_X86_CPU_FEATURE(0x40000001, 0, EAX, 0)
160 #define X86_FEATURE_KVM_NOP_IO_DELAY    KVM_X86_CPU_FEATURE(0x40000001, 0, EAX, 1)
161 #define X86_FEATURE_KVM_MMU_OP          KVM_X86_CPU_FEATURE(0x40000001, 0, EAX, 2)
162 #define X86_FEATURE_KVM_CLOCKSOURCE2    KVM_X86_CPU_FEATURE(0x40000001, 0, EAX, 3)
163 #define X86_FEATURE_KVM_ASYNC_PF        KVM_X86_CPU_FEATURE(0x40000001, 0, EAX, 4)
164 #define X86_FEATURE_KVM_STEAL_TIME      KVM_X86_CPU_FEATURE(0x40000001, 0, EAX, 5)
165 #define X86_FEATURE_KVM_PV_EOI          KVM_X86_CPU_FEATURE(0x40000001, 0, EAX, 6)
166 #define X86_FEATURE_KVM_PV_UNHALT       KVM_X86_CPU_FEATURE(0x40000001, 0, EAX, 7)
167 /* Bit 8 apparently isn't used?!?! */
168 #define X86_FEATURE_KVM_PV_TLB_FLUSH    KVM_X86_CPU_FEATURE(0x40000001, 0, EAX, 9)
169 #define X86_FEATURE_KVM_ASYNC_PF_VMEXIT KVM_X86_CPU_FEATURE(0x40000001, 0, EAX, 10)
170 #define X86_FEATURE_KVM_PV_SEND_IPI     KVM_X86_CPU_FEATURE(0x40000001, 0, EAX, 11)
171 #define X86_FEATURE_KVM_POLL_CONTROL    KVM_X86_CPU_FEATURE(0x40000001, 0, EAX, 12)
172 #define X86_FEATURE_KVM_PV_SCHED_YIELD  KVM_X86_CPU_FEATURE(0x40000001, 0, EAX, 13)
173 #define X86_FEATURE_KVM_ASYNC_PF_INT    KVM_X86_CPU_FEATURE(0x40000001, 0, EAX, 14)
174 #define X86_FEATURE_KVM_MSI_EXT_DEST_ID KVM_X86_CPU_FEATURE(0x40000001, 0, EAX, 15)
175 #define X86_FEATURE_KVM_HC_MAP_GPA_RANGE        KVM_X86_CPU_FEATURE(0x40000001, 0, EAX, 16)
176 #define X86_FEATURE_KVM_MIGRATION_CONTROL       KVM_X86_CPU_FEATURE(0x40000001, 0, EAX, 17)
177
178 /*
179  * Same idea as X86_FEATURE_XXX, but X86_PROPERTY_XXX retrieves a multi-bit
180  * value/property as opposed to a single-bit feature.  Again, pack the info
181  * into a 64-bit value to pass by value with no overhead.
182  */
183 struct kvm_x86_cpu_property {
184         u32     function;
185         u8      index;
186         u8      reg;
187         u8      lo_bit;
188         u8      hi_bit;
189 };
190 #define KVM_X86_CPU_PROPERTY(fn, idx, gpr, low_bit, high_bit)                   \
191 ({                                                                              \
192         struct kvm_x86_cpu_property property = {                                \
193                 .function = fn,                                                 \
194                 .index = idx,                                                   \
195                 .reg = KVM_CPUID_##gpr,                                         \
196                 .lo_bit = low_bit,                                              \
197                 .hi_bit = high_bit,                                             \
198         };                                                                      \
199                                                                                 \
200         kvm_static_assert(low_bit < high_bit);                                  \
201         kvm_static_assert((fn & 0xc0000000) == 0 ||                             \
202                           (fn & 0xc0000000) == 0x40000000 ||                    \
203                           (fn & 0xc0000000) == 0x80000000 ||                    \
204                           (fn & 0xc0000000) == 0xc0000000);                     \
205         kvm_static_assert(idx < BIT(sizeof(property.index) * BITS_PER_BYTE));   \
206         property;                                                               \
207 })
208
209 #define X86_PROPERTY_MAX_BASIC_LEAF             KVM_X86_CPU_PROPERTY(0, 0, EAX, 0, 31)
210 #define X86_PROPERTY_PMU_VERSION                KVM_X86_CPU_PROPERTY(0xa, 0, EAX, 0, 7)
211 #define X86_PROPERTY_PMU_NR_GP_COUNTERS         KVM_X86_CPU_PROPERTY(0xa, 0, EAX, 8, 15)
212 #define X86_PROPERTY_PMU_EBX_BIT_VECTOR_LENGTH  KVM_X86_CPU_PROPERTY(0xa, 0, EAX, 24, 31)
213
214 #define X86_PROPERTY_XSTATE_MAX_SIZE_XCR0       KVM_X86_CPU_PROPERTY(0xd,  0, EBX,  0, 31)
215 #define X86_PROPERTY_XSTATE_MAX_SIZE            KVM_X86_CPU_PROPERTY(0xd,  0, ECX,  0, 31)
216 #define X86_PROPERTY_XSTATE_TILE_SIZE           KVM_X86_CPU_PROPERTY(0xd, 18, EAX,  0, 31)
217 #define X86_PROPERTY_XSTATE_TILE_OFFSET         KVM_X86_CPU_PROPERTY(0xd, 18, EBX,  0, 31)
218 #define X86_PROPERTY_AMX_TOTAL_TILE_BYTES       KVM_X86_CPU_PROPERTY(0x1d, 1, EAX,  0, 15)
219 #define X86_PROPERTY_AMX_BYTES_PER_TILE         KVM_X86_CPU_PROPERTY(0x1d, 1, EAX, 16, 31)
220 #define X86_PROPERTY_AMX_BYTES_PER_ROW          KVM_X86_CPU_PROPERTY(0x1d, 1, EBX, 0,  15)
221 #define X86_PROPERTY_AMX_NR_TILE_REGS           KVM_X86_CPU_PROPERTY(0x1d, 1, EBX, 16, 31)
222 #define X86_PROPERTY_AMX_MAX_ROWS               KVM_X86_CPU_PROPERTY(0x1d, 1, ECX, 0,  15)
223
224 #define X86_PROPERTY_MAX_KVM_LEAF               KVM_X86_CPU_PROPERTY(0x40000000, 0, EAX, 0, 31)
225
226 #define X86_PROPERTY_MAX_EXT_LEAF               KVM_X86_CPU_PROPERTY(0x80000000, 0, EAX, 0, 31)
227 #define X86_PROPERTY_MAX_PHY_ADDR               KVM_X86_CPU_PROPERTY(0x80000008, 0, EAX, 0, 7)
228 #define X86_PROPERTY_MAX_VIRT_ADDR              KVM_X86_CPU_PROPERTY(0x80000008, 0, EAX, 8, 15)
229 #define X86_PROPERTY_PHYS_ADDR_REDUCTION        KVM_X86_CPU_PROPERTY(0x8000001F, 0, EBX, 6, 11)
230
231 #define X86_PROPERTY_MAX_CENTAUR_LEAF           KVM_X86_CPU_PROPERTY(0xC0000000, 0, EAX, 0, 31)
232
233 /*
234  * Intel's architectural PMU events are bizarre.  They have a "feature" bit
235  * that indicates the feature is _not_ supported, and a property that states
236  * the length of the bit mask of unsupported features.  A feature is supported
237  * if the size of the bit mask is larger than the "unavailable" bit, and said
238  * bit is not set.
239  *
240  * Wrap the "unavailable" feature to simplify checking whether or not a given
241  * architectural event is supported.
242  */
243 struct kvm_x86_pmu_feature {
244         struct kvm_x86_cpu_feature anti_feature;
245 };
246 #define KVM_X86_PMU_FEATURE(name, __bit)                                        \
247 ({                                                                              \
248         struct kvm_x86_pmu_feature feature = {                                  \
249                 .anti_feature = KVM_X86_CPU_FEATURE(0xa, 0, EBX, __bit),        \
250         };                                                                      \
251                                                                                 \
252         feature;                                                                \
253 })
254
255 #define X86_PMU_FEATURE_BRANCH_INSNS_RETIRED    KVM_X86_PMU_FEATURE(BRANCH_INSNS_RETIRED, 5)
256
257 static inline unsigned int x86_family(unsigned int eax)
258 {
259         unsigned int x86;
260
261         x86 = (eax >> 8) & 0xf;
262
263         if (x86 == 0xf)
264                 x86 += (eax >> 20) & 0xff;
265
266         return x86;
267 }
268
269 static inline unsigned int x86_model(unsigned int eax)
270 {
271         return ((eax >> 12) & 0xf0) | ((eax >> 4) & 0x0f);
272 }
273
274 /* Page table bitfield declarations */
275 #define PTE_PRESENT_MASK        BIT_ULL(0)
276 #define PTE_WRITABLE_MASK       BIT_ULL(1)
277 #define PTE_USER_MASK           BIT_ULL(2)
278 #define PTE_ACCESSED_MASK       BIT_ULL(5)
279 #define PTE_DIRTY_MASK          BIT_ULL(6)
280 #define PTE_LARGE_MASK          BIT_ULL(7)
281 #define PTE_GLOBAL_MASK         BIT_ULL(8)
282 #define PTE_NX_MASK             BIT_ULL(63)
283
284 #define PHYSICAL_PAGE_MASK      GENMASK_ULL(51, 12)
285
286 #define PAGE_SHIFT              12
287 #define PAGE_SIZE               (1ULL << PAGE_SHIFT)
288 #define PAGE_MASK               (~(PAGE_SIZE-1) & PHYSICAL_PAGE_MASK)
289
290 #define HUGEPAGE_SHIFT(x)       (PAGE_SHIFT + (((x) - 1) * 9))
291 #define HUGEPAGE_SIZE(x)        (1UL << HUGEPAGE_SHIFT(x))
292 #define HUGEPAGE_MASK(x)        (~(HUGEPAGE_SIZE(x) - 1) & PHYSICAL_PAGE_MASK)
293
294 #define PTE_GET_PA(pte)         ((pte) & PHYSICAL_PAGE_MASK)
295 #define PTE_GET_PFN(pte)        (PTE_GET_PA(pte) >> PAGE_SHIFT)
296
297 /* General Registers in 64-Bit Mode */
298 struct gpr64_regs {
299         u64 rax;
300         u64 rcx;
301         u64 rdx;
302         u64 rbx;
303         u64 rsp;
304         u64 rbp;
305         u64 rsi;
306         u64 rdi;
307         u64 r8;
308         u64 r9;
309         u64 r10;
310         u64 r11;
311         u64 r12;
312         u64 r13;
313         u64 r14;
314         u64 r15;
315 };
316
317 struct desc64 {
318         uint16_t limit0;
319         uint16_t base0;
320         unsigned base1:8, type:4, s:1, dpl:2, p:1;
321         unsigned limit1:4, avl:1, l:1, db:1, g:1, base2:8;
322         uint32_t base3;
323         uint32_t zero1;
324 } __attribute__((packed));
325
326 struct desc_ptr {
327         uint16_t size;
328         uint64_t address;
329 } __attribute__((packed));
330
331 struct kvm_x86_state {
332         struct kvm_xsave *xsave;
333         struct kvm_vcpu_events events;
334         struct kvm_mp_state mp_state;
335         struct kvm_regs regs;
336         struct kvm_xcrs xcrs;
337         struct kvm_sregs sregs;
338         struct kvm_debugregs debugregs;
339         union {
340                 struct kvm_nested_state nested;
341                 char nested_[16384];
342         };
343         struct kvm_msrs msrs;
344 };
345
346 static inline uint64_t get_desc64_base(const struct desc64 *desc)
347 {
348         return ((uint64_t)desc->base3 << 32) |
349                 (desc->base0 | ((desc->base1) << 16) | ((desc->base2) << 24));
350 }
351
352 static inline uint64_t rdtsc(void)
353 {
354         uint32_t eax, edx;
355         uint64_t tsc_val;
356         /*
357          * The lfence is to wait (on Intel CPUs) until all previous
358          * instructions have been executed. If software requires RDTSC to be
359          * executed prior to execution of any subsequent instruction, it can
360          * execute LFENCE immediately after RDTSC
361          */
362         __asm__ __volatile__("lfence; rdtsc; lfence" : "=a"(eax), "=d"(edx));
363         tsc_val = ((uint64_t)edx) << 32 | eax;
364         return tsc_val;
365 }
366
367 static inline uint64_t rdtscp(uint32_t *aux)
368 {
369         uint32_t eax, edx;
370
371         __asm__ __volatile__("rdtscp" : "=a"(eax), "=d"(edx), "=c"(*aux));
372         return ((uint64_t)edx) << 32 | eax;
373 }
374
375 static inline uint64_t rdmsr(uint32_t msr)
376 {
377         uint32_t a, d;
378
379         __asm__ __volatile__("rdmsr" : "=a"(a), "=d"(d) : "c"(msr) : "memory");
380
381         return a | ((uint64_t) d << 32);
382 }
383
384 static inline void wrmsr(uint32_t msr, uint64_t value)
385 {
386         uint32_t a = value;
387         uint32_t d = value >> 32;
388
389         __asm__ __volatile__("wrmsr" :: "a"(a), "d"(d), "c"(msr) : "memory");
390 }
391
392
393 static inline uint16_t inw(uint16_t port)
394 {
395         uint16_t tmp;
396
397         __asm__ __volatile__("in %%dx, %%ax"
398                 : /* output */ "=a" (tmp)
399                 : /* input */ "d" (port));
400
401         return tmp;
402 }
403
404 static inline uint16_t get_es(void)
405 {
406         uint16_t es;
407
408         __asm__ __volatile__("mov %%es, %[es]"
409                              : /* output */ [es]"=rm"(es));
410         return es;
411 }
412
413 static inline uint16_t get_cs(void)
414 {
415         uint16_t cs;
416
417         __asm__ __volatile__("mov %%cs, %[cs]"
418                              : /* output */ [cs]"=rm"(cs));
419         return cs;
420 }
421
422 static inline uint16_t get_ss(void)
423 {
424         uint16_t ss;
425
426         __asm__ __volatile__("mov %%ss, %[ss]"
427                              : /* output */ [ss]"=rm"(ss));
428         return ss;
429 }
430
431 static inline uint16_t get_ds(void)
432 {
433         uint16_t ds;
434
435         __asm__ __volatile__("mov %%ds, %[ds]"
436                              : /* output */ [ds]"=rm"(ds));
437         return ds;
438 }
439
440 static inline uint16_t get_fs(void)
441 {
442         uint16_t fs;
443
444         __asm__ __volatile__("mov %%fs, %[fs]"
445                              : /* output */ [fs]"=rm"(fs));
446         return fs;
447 }
448
449 static inline uint16_t get_gs(void)
450 {
451         uint16_t gs;
452
453         __asm__ __volatile__("mov %%gs, %[gs]"
454                              : /* output */ [gs]"=rm"(gs));
455         return gs;
456 }
457
458 static inline uint16_t get_tr(void)
459 {
460         uint16_t tr;
461
462         __asm__ __volatile__("str %[tr]"
463                              : /* output */ [tr]"=rm"(tr));
464         return tr;
465 }
466
467 static inline uint64_t get_cr0(void)
468 {
469         uint64_t cr0;
470
471         __asm__ __volatile__("mov %%cr0, %[cr0]"
472                              : /* output */ [cr0]"=r"(cr0));
473         return cr0;
474 }
475
476 static inline uint64_t get_cr3(void)
477 {
478         uint64_t cr3;
479
480         __asm__ __volatile__("mov %%cr3, %[cr3]"
481                              : /* output */ [cr3]"=r"(cr3));
482         return cr3;
483 }
484
485 static inline uint64_t get_cr4(void)
486 {
487         uint64_t cr4;
488
489         __asm__ __volatile__("mov %%cr4, %[cr4]"
490                              : /* output */ [cr4]"=r"(cr4));
491         return cr4;
492 }
493
494 static inline void set_cr4(uint64_t val)
495 {
496         __asm__ __volatile__("mov %0, %%cr4" : : "r" (val) : "memory");
497 }
498
499 static inline struct desc_ptr get_gdt(void)
500 {
501         struct desc_ptr gdt;
502         __asm__ __volatile__("sgdt %[gdt]"
503                              : /* output */ [gdt]"=m"(gdt));
504         return gdt;
505 }
506
507 static inline struct desc_ptr get_idt(void)
508 {
509         struct desc_ptr idt;
510         __asm__ __volatile__("sidt %[idt]"
511                              : /* output */ [idt]"=m"(idt));
512         return idt;
513 }
514
515 static inline void outl(uint16_t port, uint32_t value)
516 {
517         __asm__ __volatile__("outl %%eax, %%dx" : : "d"(port), "a"(value));
518 }
519
520 static inline void __cpuid(uint32_t function, uint32_t index,
521                            uint32_t *eax, uint32_t *ebx,
522                            uint32_t *ecx, uint32_t *edx)
523 {
524         *eax = function;
525         *ecx = index;
526
527         asm volatile("cpuid"
528             : "=a" (*eax),
529               "=b" (*ebx),
530               "=c" (*ecx),
531               "=d" (*edx)
532             : "0" (*eax), "2" (*ecx)
533             : "memory");
534 }
535
536 static inline void cpuid(uint32_t function,
537                          uint32_t *eax, uint32_t *ebx,
538                          uint32_t *ecx, uint32_t *edx)
539 {
540         return __cpuid(function, 0, eax, ebx, ecx, edx);
541 }
542
543 static inline uint32_t this_cpu_fms(void)
544 {
545         uint32_t eax, ebx, ecx, edx;
546
547         cpuid(1, &eax, &ebx, &ecx, &edx);
548         return eax;
549 }
550
551 static inline uint32_t this_cpu_family(void)
552 {
553         return x86_family(this_cpu_fms());
554 }
555
556 static inline uint32_t this_cpu_model(void)
557 {
558         return x86_model(this_cpu_fms());
559 }
560
561 static inline bool this_cpu_vendor_string_is(const char *vendor)
562 {
563         const uint32_t *chunk = (const uint32_t *)vendor;
564         uint32_t eax, ebx, ecx, edx;
565
566         cpuid(0, &eax, &ebx, &ecx, &edx);
567         return (ebx == chunk[0] && edx == chunk[1] && ecx == chunk[2]);
568 }
569
570 static inline bool this_cpu_is_intel(void)
571 {
572         return this_cpu_vendor_string_is("GenuineIntel");
573 }
574
575 /*
576  * Exclude early K5 samples with a vendor string of "AMDisbetter!"
577  */
578 static inline bool this_cpu_is_amd(void)
579 {
580         return this_cpu_vendor_string_is("AuthenticAMD");
581 }
582
583 static inline uint32_t __this_cpu_has(uint32_t function, uint32_t index,
584                                       uint8_t reg, uint8_t lo, uint8_t hi)
585 {
586         uint32_t gprs[4];
587
588         __cpuid(function, index,
589                 &gprs[KVM_CPUID_EAX], &gprs[KVM_CPUID_EBX],
590                 &gprs[KVM_CPUID_ECX], &gprs[KVM_CPUID_EDX]);
591
592         return (gprs[reg] & GENMASK(hi, lo)) >> lo;
593 }
594
595 static inline bool this_cpu_has(struct kvm_x86_cpu_feature feature)
596 {
597         return __this_cpu_has(feature.function, feature.index,
598                               feature.reg, feature.bit, feature.bit);
599 }
600
601 static inline uint32_t this_cpu_property(struct kvm_x86_cpu_property property)
602 {
603         return __this_cpu_has(property.function, property.index,
604                               property.reg, property.lo_bit, property.hi_bit);
605 }
606
607 static __always_inline bool this_cpu_has_p(struct kvm_x86_cpu_property property)
608 {
609         uint32_t max_leaf;
610
611         switch (property.function & 0xc0000000) {
612         case 0:
613                 max_leaf = this_cpu_property(X86_PROPERTY_MAX_BASIC_LEAF);
614                 break;
615         case 0x40000000:
616                 max_leaf = this_cpu_property(X86_PROPERTY_MAX_KVM_LEAF);
617                 break;
618         case 0x80000000:
619                 max_leaf = this_cpu_property(X86_PROPERTY_MAX_EXT_LEAF);
620                 break;
621         case 0xc0000000:
622                 max_leaf = this_cpu_property(X86_PROPERTY_MAX_CENTAUR_LEAF);
623         }
624         return max_leaf >= property.function;
625 }
626
627 static inline bool this_pmu_has(struct kvm_x86_pmu_feature feature)
628 {
629         uint32_t nr_bits = this_cpu_property(X86_PROPERTY_PMU_EBX_BIT_VECTOR_LENGTH);
630
631         return nr_bits > feature.anti_feature.bit &&
632                !this_cpu_has(feature.anti_feature);
633 }
634
635 typedef u32             __attribute__((vector_size(16))) sse128_t;
636 #define __sse128_u      union { sse128_t vec; u64 as_u64[2]; u32 as_u32[4]; }
637 #define sse128_lo(x)    ({ __sse128_u t; t.vec = x; t.as_u64[0]; })
638 #define sse128_hi(x)    ({ __sse128_u t; t.vec = x; t.as_u64[1]; })
639
640 static inline void read_sse_reg(int reg, sse128_t *data)
641 {
642         switch (reg) {
643         case 0:
644                 asm("movdqa %%xmm0, %0" : "=m"(*data));
645                 break;
646         case 1:
647                 asm("movdqa %%xmm1, %0" : "=m"(*data));
648                 break;
649         case 2:
650                 asm("movdqa %%xmm2, %0" : "=m"(*data));
651                 break;
652         case 3:
653                 asm("movdqa %%xmm3, %0" : "=m"(*data));
654                 break;
655         case 4:
656                 asm("movdqa %%xmm4, %0" : "=m"(*data));
657                 break;
658         case 5:
659                 asm("movdqa %%xmm5, %0" : "=m"(*data));
660                 break;
661         case 6:
662                 asm("movdqa %%xmm6, %0" : "=m"(*data));
663                 break;
664         case 7:
665                 asm("movdqa %%xmm7, %0" : "=m"(*data));
666                 break;
667         default:
668                 BUG();
669         }
670 }
671
672 static inline void write_sse_reg(int reg, const sse128_t *data)
673 {
674         switch (reg) {
675         case 0:
676                 asm("movdqa %0, %%xmm0" : : "m"(*data));
677                 break;
678         case 1:
679                 asm("movdqa %0, %%xmm1" : : "m"(*data));
680                 break;
681         case 2:
682                 asm("movdqa %0, %%xmm2" : : "m"(*data));
683                 break;
684         case 3:
685                 asm("movdqa %0, %%xmm3" : : "m"(*data));
686                 break;
687         case 4:
688                 asm("movdqa %0, %%xmm4" : : "m"(*data));
689                 break;
690         case 5:
691                 asm("movdqa %0, %%xmm5" : : "m"(*data));
692                 break;
693         case 6:
694                 asm("movdqa %0, %%xmm6" : : "m"(*data));
695                 break;
696         case 7:
697                 asm("movdqa %0, %%xmm7" : : "m"(*data));
698                 break;
699         default:
700                 BUG();
701         }
702 }
703
704 static inline void cpu_relax(void)
705 {
706         asm volatile("rep; nop" ::: "memory");
707 }
708
709 #define ud2()                   \
710         __asm__ __volatile__(   \
711                 "ud2\n" \
712                 )
713
714 #define hlt()                   \
715         __asm__ __volatile__(   \
716                 "hlt\n" \
717                 )
718
719 struct kvm_x86_state *vcpu_save_state(struct kvm_vcpu *vcpu);
720 void vcpu_load_state(struct kvm_vcpu *vcpu, struct kvm_x86_state *state);
721 void kvm_x86_state_cleanup(struct kvm_x86_state *state);
722
723 const struct kvm_msr_list *kvm_get_msr_index_list(void);
724 const struct kvm_msr_list *kvm_get_feature_msr_index_list(void);
725 bool kvm_msr_is_in_save_restore_list(uint32_t msr_index);
726 uint64_t kvm_get_feature_msr(uint64_t msr_index);
727
728 static inline void vcpu_msrs_get(struct kvm_vcpu *vcpu,
729                                  struct kvm_msrs *msrs)
730 {
731         int r = __vcpu_ioctl(vcpu, KVM_GET_MSRS, msrs);
732
733         TEST_ASSERT(r == msrs->nmsrs,
734                     "KVM_GET_MSRS failed, r: %i (failed on MSR %x)",
735                     r, r < 0 || r >= msrs->nmsrs ? -1 : msrs->entries[r].index);
736 }
737 static inline void vcpu_msrs_set(struct kvm_vcpu *vcpu, struct kvm_msrs *msrs)
738 {
739         int r = __vcpu_ioctl(vcpu, KVM_SET_MSRS, msrs);
740
741         TEST_ASSERT(r == msrs->nmsrs,
742                     "KVM_SET_MSRS failed, r: %i (failed on MSR %x)",
743                     r, r < 0 || r >= msrs->nmsrs ? -1 : msrs->entries[r].index);
744 }
745 static inline void vcpu_debugregs_get(struct kvm_vcpu *vcpu,
746                                       struct kvm_debugregs *debugregs)
747 {
748         vcpu_ioctl(vcpu, KVM_GET_DEBUGREGS, debugregs);
749 }
750 static inline void vcpu_debugregs_set(struct kvm_vcpu *vcpu,
751                                       struct kvm_debugregs *debugregs)
752 {
753         vcpu_ioctl(vcpu, KVM_SET_DEBUGREGS, debugregs);
754 }
755 static inline void vcpu_xsave_get(struct kvm_vcpu *vcpu,
756                                   struct kvm_xsave *xsave)
757 {
758         vcpu_ioctl(vcpu, KVM_GET_XSAVE, xsave);
759 }
760 static inline void vcpu_xsave2_get(struct kvm_vcpu *vcpu,
761                                    struct kvm_xsave *xsave)
762 {
763         vcpu_ioctl(vcpu, KVM_GET_XSAVE2, xsave);
764 }
765 static inline void vcpu_xsave_set(struct kvm_vcpu *vcpu,
766                                   struct kvm_xsave *xsave)
767 {
768         vcpu_ioctl(vcpu, KVM_SET_XSAVE, xsave);
769 }
770 static inline void vcpu_xcrs_get(struct kvm_vcpu *vcpu,
771                                  struct kvm_xcrs *xcrs)
772 {
773         vcpu_ioctl(vcpu, KVM_GET_XCRS, xcrs);
774 }
775 static inline void vcpu_xcrs_set(struct kvm_vcpu *vcpu, struct kvm_xcrs *xcrs)
776 {
777         vcpu_ioctl(vcpu, KVM_SET_XCRS, xcrs);
778 }
779
780 const struct kvm_cpuid_entry2 *get_cpuid_entry(const struct kvm_cpuid2 *cpuid,
781                                                uint32_t function, uint32_t index);
782 const struct kvm_cpuid2 *kvm_get_supported_cpuid(void);
783 const struct kvm_cpuid2 *kvm_get_supported_hv_cpuid(void);
784 const struct kvm_cpuid2 *vcpu_get_supported_hv_cpuid(struct kvm_vcpu *vcpu);
785
786 static inline uint32_t kvm_cpu_fms(void)
787 {
788         return get_cpuid_entry(kvm_get_supported_cpuid(), 0x1, 0)->eax;
789 }
790
791 static inline uint32_t kvm_cpu_family(void)
792 {
793         return x86_family(kvm_cpu_fms());
794 }
795
796 static inline uint32_t kvm_cpu_model(void)
797 {
798         return x86_model(kvm_cpu_fms());
799 }
800
801 bool kvm_cpuid_has(const struct kvm_cpuid2 *cpuid,
802                    struct kvm_x86_cpu_feature feature);
803
804 static inline bool kvm_cpu_has(struct kvm_x86_cpu_feature feature)
805 {
806         return kvm_cpuid_has(kvm_get_supported_cpuid(), feature);
807 }
808
809 uint32_t kvm_cpuid_property(const struct kvm_cpuid2 *cpuid,
810                             struct kvm_x86_cpu_property property);
811
812 static inline uint32_t kvm_cpu_property(struct kvm_x86_cpu_property property)
813 {
814         return kvm_cpuid_property(kvm_get_supported_cpuid(), property);
815 }
816
817 static __always_inline bool kvm_cpu_has_p(struct kvm_x86_cpu_property property)
818 {
819         uint32_t max_leaf;
820
821         switch (property.function & 0xc0000000) {
822         case 0:
823                 max_leaf = kvm_cpu_property(X86_PROPERTY_MAX_BASIC_LEAF);
824                 break;
825         case 0x40000000:
826                 max_leaf = kvm_cpu_property(X86_PROPERTY_MAX_KVM_LEAF);
827                 break;
828         case 0x80000000:
829                 max_leaf = kvm_cpu_property(X86_PROPERTY_MAX_EXT_LEAF);
830                 break;
831         case 0xc0000000:
832                 max_leaf = kvm_cpu_property(X86_PROPERTY_MAX_CENTAUR_LEAF);
833         }
834         return max_leaf >= property.function;
835 }
836
837 static inline bool kvm_pmu_has(struct kvm_x86_pmu_feature feature)
838 {
839         uint32_t nr_bits = kvm_cpu_property(X86_PROPERTY_PMU_EBX_BIT_VECTOR_LENGTH);
840
841         return nr_bits > feature.anti_feature.bit &&
842                !kvm_cpu_has(feature.anti_feature);
843 }
844
845 static inline size_t kvm_cpuid2_size(int nr_entries)
846 {
847         return sizeof(struct kvm_cpuid2) +
848                sizeof(struct kvm_cpuid_entry2) * nr_entries;
849 }
850
851 /*
852  * Allocate a "struct kvm_cpuid2* instance, with the 0-length arrary of
853  * entries sized to hold @nr_entries.  The caller is responsible for freeing
854  * the struct.
855  */
856 static inline struct kvm_cpuid2 *allocate_kvm_cpuid2(int nr_entries)
857 {
858         struct kvm_cpuid2 *cpuid;
859
860         cpuid = malloc(kvm_cpuid2_size(nr_entries));
861         TEST_ASSERT(cpuid, "-ENOMEM when allocating kvm_cpuid2");
862
863         cpuid->nent = nr_entries;
864
865         return cpuid;
866 }
867
868 void vcpu_init_cpuid(struct kvm_vcpu *vcpu, const struct kvm_cpuid2 *cpuid);
869 void vcpu_set_hv_cpuid(struct kvm_vcpu *vcpu);
870
871 static inline struct kvm_cpuid_entry2 *__vcpu_get_cpuid_entry(struct kvm_vcpu *vcpu,
872                                                               uint32_t function,
873                                                               uint32_t index)
874 {
875         return (struct kvm_cpuid_entry2 *)get_cpuid_entry(vcpu->cpuid,
876                                                           function, index);
877 }
878
879 static inline struct kvm_cpuid_entry2 *vcpu_get_cpuid_entry(struct kvm_vcpu *vcpu,
880                                                             uint32_t function)
881 {
882         return __vcpu_get_cpuid_entry(vcpu, function, 0);
883 }
884
885 static inline int __vcpu_set_cpuid(struct kvm_vcpu *vcpu)
886 {
887         int r;
888
889         TEST_ASSERT(vcpu->cpuid, "Must do vcpu_init_cpuid() first");
890         r = __vcpu_ioctl(vcpu, KVM_SET_CPUID2, vcpu->cpuid);
891         if (r)
892                 return r;
893
894         /* On success, refresh the cache to pick up adjustments made by KVM. */
895         vcpu_ioctl(vcpu, KVM_GET_CPUID2, vcpu->cpuid);
896         return 0;
897 }
898
899 static inline void vcpu_set_cpuid(struct kvm_vcpu *vcpu)
900 {
901         TEST_ASSERT(vcpu->cpuid, "Must do vcpu_init_cpuid() first");
902         vcpu_ioctl(vcpu, KVM_SET_CPUID2, vcpu->cpuid);
903
904         /* Refresh the cache to pick up adjustments made by KVM. */
905         vcpu_ioctl(vcpu, KVM_GET_CPUID2, vcpu->cpuid);
906 }
907
908 void vcpu_set_cpuid_maxphyaddr(struct kvm_vcpu *vcpu, uint8_t maxphyaddr);
909
910 void vcpu_clear_cpuid_entry(struct kvm_vcpu *vcpu, uint32_t function);
911 void vcpu_set_or_clear_cpuid_feature(struct kvm_vcpu *vcpu,
912                                      struct kvm_x86_cpu_feature feature,
913                                      bool set);
914
915 static inline void vcpu_set_cpuid_feature(struct kvm_vcpu *vcpu,
916                                           struct kvm_x86_cpu_feature feature)
917 {
918         vcpu_set_or_clear_cpuid_feature(vcpu, feature, true);
919
920 }
921
922 static inline void vcpu_clear_cpuid_feature(struct kvm_vcpu *vcpu,
923                                             struct kvm_x86_cpu_feature feature)
924 {
925         vcpu_set_or_clear_cpuid_feature(vcpu, feature, false);
926 }
927
928 uint64_t vcpu_get_msr(struct kvm_vcpu *vcpu, uint64_t msr_index);
929 int _vcpu_set_msr(struct kvm_vcpu *vcpu, uint64_t msr_index, uint64_t msr_value);
930
931 static inline void vcpu_set_msr(struct kvm_vcpu *vcpu, uint64_t msr_index,
932                                 uint64_t msr_value)
933 {
934         int r = _vcpu_set_msr(vcpu, msr_index, msr_value);
935
936         TEST_ASSERT(r == 1, KVM_IOCTL_ERROR(KVM_SET_MSRS, r));
937 }
938
939
940 void kvm_get_cpu_address_width(unsigned int *pa_bits, unsigned int *va_bits);
941 bool vm_is_unrestricted_guest(struct kvm_vm *vm);
942
943 struct ex_regs {
944         uint64_t rax, rcx, rdx, rbx;
945         uint64_t rbp, rsi, rdi;
946         uint64_t r8, r9, r10, r11;
947         uint64_t r12, r13, r14, r15;
948         uint64_t vector;
949         uint64_t error_code;
950         uint64_t rip;
951         uint64_t cs;
952         uint64_t rflags;
953 };
954
955 struct idt_entry {
956         uint16_t offset0;
957         uint16_t selector;
958         uint16_t ist : 3;
959         uint16_t : 5;
960         uint16_t type : 4;
961         uint16_t : 1;
962         uint16_t dpl : 2;
963         uint16_t p : 1;
964         uint16_t offset1;
965         uint32_t offset2; uint32_t reserved;
966 };
967
968 void vm_init_descriptor_tables(struct kvm_vm *vm);
969 void vcpu_init_descriptor_tables(struct kvm_vcpu *vcpu);
970 void vm_install_exception_handler(struct kvm_vm *vm, int vector,
971                         void (*handler)(struct ex_regs *));
972
973 /* If a toddler were to say "abracadabra". */
974 #define KVM_EXCEPTION_MAGIC 0xabacadabaULL
975
976 /*
977  * KVM selftest exception fixup uses registers to coordinate with the exception
978  * handler, versus the kernel's in-memory tables and KVM-Unit-Tests's in-memory
979  * per-CPU data.  Using only registers avoids having to map memory into the
980  * guest, doesn't require a valid, stable GS.base, and reduces the risk of
981  * for recursive faults when accessing memory in the handler.  The downside to
982  * using registers is that it restricts what registers can be used by the actual
983  * instruction.  But, selftests are 64-bit only, making register* pressure a
984  * minor concern.  Use r9-r11 as they are volatile, i.e. don't need to be saved
985  * by the callee, and except for r11 are not implicit parameters to any
986  * instructions.  Ideally, fixup would use r8-r10 and thus avoid implicit
987  * parameters entirely, but Hyper-V's hypercall ABI uses r8 and testing Hyper-V
988  * is higher priority than testing non-faulting SYSCALL/SYSRET.
989  *
990  * Note, the fixup handler deliberately does not handle #DE, i.e. the vector
991  * is guaranteed to be non-zero on fault.
992  *
993  * REGISTER INPUTS:
994  * r9  = MAGIC
995  * r10 = RIP
996  * r11 = new RIP on fault
997  *
998  * REGISTER OUTPUTS:
999  * r9  = exception vector (non-zero)
1000  * r10 = error code
1001  */
1002 #define KVM_ASM_SAFE(insn)                                      \
1003         "mov $" __stringify(KVM_EXCEPTION_MAGIC) ", %%r9\n\t"   \
1004         "lea 1f(%%rip), %%r10\n\t"                              \
1005         "lea 2f(%%rip), %%r11\n\t"                              \
1006         "1: " insn "\n\t"                                       \
1007         "xor %%r9, %%r9\n\t"                                    \
1008         "2:\n\t"                                                \
1009         "mov  %%r9b, %[vector]\n\t"                             \
1010         "mov  %%r10, %[error_code]\n\t"
1011
1012 #define KVM_ASM_SAFE_OUTPUTS(v, ec)     [vector] "=qm"(v), [error_code] "=rm"(ec)
1013 #define KVM_ASM_SAFE_CLOBBERS   "r9", "r10", "r11"
1014
1015 #define kvm_asm_safe(insn, inputs...)                                   \
1016 ({                                                                      \
1017         uint64_t ign_error_code;                                        \
1018         uint8_t vector;                                                 \
1019                                                                         \
1020         asm volatile(KVM_ASM_SAFE(insn)                                 \
1021                      : KVM_ASM_SAFE_OUTPUTS(vector, ign_error_code)     \
1022                      : inputs                                           \
1023                      : KVM_ASM_SAFE_CLOBBERS);                          \
1024         vector;                                                         \
1025 })
1026
1027 #define kvm_asm_safe_ec(insn, error_code, inputs...)                    \
1028 ({                                                                      \
1029         uint8_t vector;                                                 \
1030                                                                         \
1031         asm volatile(KVM_ASM_SAFE(insn)                                 \
1032                      : KVM_ASM_SAFE_OUTPUTS(vector, error_code)         \
1033                      : inputs                                           \
1034                      : KVM_ASM_SAFE_CLOBBERS);                          \
1035         vector;                                                         \
1036 })
1037
1038 static inline uint8_t rdmsr_safe(uint32_t msr, uint64_t *val)
1039 {
1040         uint64_t error_code;
1041         uint8_t vector;
1042         uint32_t a, d;
1043
1044         asm volatile(KVM_ASM_SAFE("rdmsr")
1045                      : "=a"(a), "=d"(d), KVM_ASM_SAFE_OUTPUTS(vector, error_code)
1046                      : "c"(msr)
1047                      : KVM_ASM_SAFE_CLOBBERS);
1048
1049         *val = (uint64_t)a | ((uint64_t)d << 32);
1050         return vector;
1051 }
1052
1053 static inline uint8_t wrmsr_safe(uint32_t msr, uint64_t val)
1054 {
1055         return kvm_asm_safe("wrmsr", "a"(val & -1u), "d"(val >> 32), "c"(msr));
1056 }
1057
1058 bool kvm_is_tdp_enabled(void);
1059
1060 uint64_t *__vm_get_page_table_entry(struct kvm_vm *vm, uint64_t vaddr,
1061                                     int *level);
1062 uint64_t *vm_get_page_table_entry(struct kvm_vm *vm, uint64_t vaddr);
1063
1064 uint64_t kvm_hypercall(uint64_t nr, uint64_t a0, uint64_t a1, uint64_t a2,
1065                        uint64_t a3);
1066
1067 void __vm_xsave_require_permission(int bit, const char *name);
1068
1069 #define vm_xsave_require_permission(perm)       \
1070         __vm_xsave_require_permission(perm, #perm)
1071
1072 enum pg_level {
1073         PG_LEVEL_NONE,
1074         PG_LEVEL_4K,
1075         PG_LEVEL_2M,
1076         PG_LEVEL_1G,
1077         PG_LEVEL_512G,
1078         PG_LEVEL_NUM
1079 };
1080
1081 #define PG_LEVEL_SHIFT(_level) ((_level - 1) * 9 + 12)
1082 #define PG_LEVEL_SIZE(_level) (1ull << PG_LEVEL_SHIFT(_level))
1083
1084 #define PG_SIZE_4K PG_LEVEL_SIZE(PG_LEVEL_4K)
1085 #define PG_SIZE_2M PG_LEVEL_SIZE(PG_LEVEL_2M)
1086 #define PG_SIZE_1G PG_LEVEL_SIZE(PG_LEVEL_1G)
1087
1088 void __virt_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr, int level);
1089 void virt_map_level(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr,
1090                     uint64_t nr_bytes, int level);
1091
1092 /*
1093  * Basic CPU control in CR0
1094  */
1095 #define X86_CR0_PE          (1UL<<0) /* Protection Enable */
1096 #define X86_CR0_MP          (1UL<<1) /* Monitor Coprocessor */
1097 #define X86_CR0_EM          (1UL<<2) /* Emulation */
1098 #define X86_CR0_TS          (1UL<<3) /* Task Switched */
1099 #define X86_CR0_ET          (1UL<<4) /* Extension Type */
1100 #define X86_CR0_NE          (1UL<<5) /* Numeric Error */
1101 #define X86_CR0_WP          (1UL<<16) /* Write Protect */
1102 #define X86_CR0_AM          (1UL<<18) /* Alignment Mask */
1103 #define X86_CR0_NW          (1UL<<29) /* Not Write-through */
1104 #define X86_CR0_CD          (1UL<<30) /* Cache Disable */
1105 #define X86_CR0_PG          (1UL<<31) /* Paging */
1106
1107 #define XSTATE_XTILE_CFG_BIT            17
1108 #define XSTATE_XTILE_DATA_BIT           18
1109
1110 #define XSTATE_XTILE_CFG_MASK           (1ULL << XSTATE_XTILE_CFG_BIT)
1111 #define XSTATE_XTILE_DATA_MASK          (1ULL << XSTATE_XTILE_DATA_BIT)
1112 #define XFEATURE_XTILE_MASK             (XSTATE_XTILE_CFG_MASK | \
1113                                         XSTATE_XTILE_DATA_MASK)
1114
1115 #define PFERR_PRESENT_BIT 0
1116 #define PFERR_WRITE_BIT 1
1117 #define PFERR_USER_BIT 2
1118 #define PFERR_RSVD_BIT 3
1119 #define PFERR_FETCH_BIT 4
1120 #define PFERR_PK_BIT 5
1121 #define PFERR_SGX_BIT 15
1122 #define PFERR_GUEST_FINAL_BIT 32
1123 #define PFERR_GUEST_PAGE_BIT 33
1124 #define PFERR_IMPLICIT_ACCESS_BIT 48
1125
1126 #define PFERR_PRESENT_MASK      BIT(PFERR_PRESENT_BIT)
1127 #define PFERR_WRITE_MASK        BIT(PFERR_WRITE_BIT)
1128 #define PFERR_USER_MASK         BIT(PFERR_USER_BIT)
1129 #define PFERR_RSVD_MASK         BIT(PFERR_RSVD_BIT)
1130 #define PFERR_FETCH_MASK        BIT(PFERR_FETCH_BIT)
1131 #define PFERR_PK_MASK           BIT(PFERR_PK_BIT)
1132 #define PFERR_SGX_MASK          BIT(PFERR_SGX_BIT)
1133 #define PFERR_GUEST_FINAL_MASK  BIT_ULL(PFERR_GUEST_FINAL_BIT)
1134 #define PFERR_GUEST_PAGE_MASK   BIT_ULL(PFERR_GUEST_PAGE_BIT)
1135 #define PFERR_IMPLICIT_ACCESS   BIT_ULL(PFERR_IMPLICIT_ACCESS_BIT)
1136
1137 #endif /* SELFTEST_KVM_PROCESSOR_H */