1 /* SPDX-License-Identifier: GPL-2.0-only */
3 * tools/testing/selftests/kvm/include/x86_64/processor.h
5 * Copyright (C) 2018, Google LLC.
8 #ifndef SELFTEST_KVM_PROCESSOR_H
9 #define SELFTEST_KVM_PROCESSOR_H
15 #include <asm/msr-index.h>
16 #include <asm/prctl.h>
18 #include <linux/kvm_para.h>
19 #include <linux/stringify.h>
21 #include "../kvm_util.h"
23 extern bool host_cpu_is_intel;
24 extern bool host_cpu_is_amd;
26 #define NMI_VECTOR 0x02
28 #define X86_EFLAGS_FIXED (1u << 1)
30 #define X86_CR4_VME (1ul << 0)
31 #define X86_CR4_PVI (1ul << 1)
32 #define X86_CR4_TSD (1ul << 2)
33 #define X86_CR4_DE (1ul << 3)
34 #define X86_CR4_PSE (1ul << 4)
35 #define X86_CR4_PAE (1ul << 5)
36 #define X86_CR4_MCE (1ul << 6)
37 #define X86_CR4_PGE (1ul << 7)
38 #define X86_CR4_PCE (1ul << 8)
39 #define X86_CR4_OSFXSR (1ul << 9)
40 #define X86_CR4_OSXMMEXCPT (1ul << 10)
41 #define X86_CR4_UMIP (1ul << 11)
42 #define X86_CR4_LA57 (1ul << 12)
43 #define X86_CR4_VMXE (1ul << 13)
44 #define X86_CR4_SMXE (1ul << 14)
45 #define X86_CR4_FSGSBASE (1ul << 16)
46 #define X86_CR4_PCIDE (1ul << 17)
47 #define X86_CR4_OSXSAVE (1ul << 18)
48 #define X86_CR4_SMEP (1ul << 20)
49 #define X86_CR4_SMAP (1ul << 21)
50 #define X86_CR4_PKE (1ul << 22)
52 struct xstate_header {
56 } __attribute__((packed));
60 struct xstate_header header;
61 u8 extended_state_area[0];
62 } __attribute__ ((packed, aligned (64)));
64 #define XFEATURE_MASK_FP BIT_ULL(0)
65 #define XFEATURE_MASK_SSE BIT_ULL(1)
66 #define XFEATURE_MASK_YMM BIT_ULL(2)
67 #define XFEATURE_MASK_BNDREGS BIT_ULL(3)
68 #define XFEATURE_MASK_BNDCSR BIT_ULL(4)
69 #define XFEATURE_MASK_OPMASK BIT_ULL(5)
70 #define XFEATURE_MASK_ZMM_Hi256 BIT_ULL(6)
71 #define XFEATURE_MASK_Hi16_ZMM BIT_ULL(7)
72 #define XFEATURE_MASK_PT BIT_ULL(8)
73 #define XFEATURE_MASK_PKRU BIT_ULL(9)
74 #define XFEATURE_MASK_PASID BIT_ULL(10)
75 #define XFEATURE_MASK_CET_USER BIT_ULL(11)
76 #define XFEATURE_MASK_CET_KERNEL BIT_ULL(12)
77 #define XFEATURE_MASK_LBR BIT_ULL(15)
78 #define XFEATURE_MASK_XTILE_CFG BIT_ULL(17)
79 #define XFEATURE_MASK_XTILE_DATA BIT_ULL(18)
81 #define XFEATURE_MASK_AVX512 (XFEATURE_MASK_OPMASK | \
82 XFEATURE_MASK_ZMM_Hi256 | \
83 XFEATURE_MASK_Hi16_ZMM)
84 #define XFEATURE_MASK_XTILE (XFEATURE_MASK_XTILE_DATA | \
85 XFEATURE_MASK_XTILE_CFG)
87 /* Note, these are ordered alphabetically to match kvm_cpuid_entry2. Eww. */
88 enum cpuid_output_regs {
96 * Pack the information into a 64-bit value so that each X86_FEATURE_XXX can be
97 * passed by value with no overhead.
99 struct kvm_x86_cpu_feature {
105 #define KVM_X86_CPU_FEATURE(fn, idx, gpr, __bit) \
107 struct kvm_x86_cpu_feature feature = { \
110 .reg = KVM_CPUID_##gpr, \
114 kvm_static_assert((fn & 0xc0000000) == 0 || \
115 (fn & 0xc0000000) == 0x40000000 || \
116 (fn & 0xc0000000) == 0x80000000 || \
117 (fn & 0xc0000000) == 0xc0000000); \
118 kvm_static_assert(idx < BIT(sizeof(feature.index) * BITS_PER_BYTE)); \
123 * Basic Leafs, a.k.a. Intel defined
125 #define X86_FEATURE_MWAIT KVM_X86_CPU_FEATURE(0x1, 0, ECX, 3)
126 #define X86_FEATURE_VMX KVM_X86_CPU_FEATURE(0x1, 0, ECX, 5)
127 #define X86_FEATURE_SMX KVM_X86_CPU_FEATURE(0x1, 0, ECX, 6)
128 #define X86_FEATURE_PDCM KVM_X86_CPU_FEATURE(0x1, 0, ECX, 15)
129 #define X86_FEATURE_PCID KVM_X86_CPU_FEATURE(0x1, 0, ECX, 17)
130 #define X86_FEATURE_X2APIC KVM_X86_CPU_FEATURE(0x1, 0, ECX, 21)
131 #define X86_FEATURE_MOVBE KVM_X86_CPU_FEATURE(0x1, 0, ECX, 22)
132 #define X86_FEATURE_TSC_DEADLINE_TIMER KVM_X86_CPU_FEATURE(0x1, 0, ECX, 24)
133 #define X86_FEATURE_XSAVE KVM_X86_CPU_FEATURE(0x1, 0, ECX, 26)
134 #define X86_FEATURE_OSXSAVE KVM_X86_CPU_FEATURE(0x1, 0, ECX, 27)
135 #define X86_FEATURE_RDRAND KVM_X86_CPU_FEATURE(0x1, 0, ECX, 30)
136 #define X86_FEATURE_HYPERVISOR KVM_X86_CPU_FEATURE(0x1, 0, ECX, 31)
137 #define X86_FEATURE_PAE KVM_X86_CPU_FEATURE(0x1, 0, EDX, 6)
138 #define X86_FEATURE_MCE KVM_X86_CPU_FEATURE(0x1, 0, EDX, 7)
139 #define X86_FEATURE_APIC KVM_X86_CPU_FEATURE(0x1, 0, EDX, 9)
140 #define X86_FEATURE_CLFLUSH KVM_X86_CPU_FEATURE(0x1, 0, EDX, 19)
141 #define X86_FEATURE_XMM KVM_X86_CPU_FEATURE(0x1, 0, EDX, 25)
142 #define X86_FEATURE_XMM2 KVM_X86_CPU_FEATURE(0x1, 0, EDX, 26)
143 #define X86_FEATURE_FSGSBASE KVM_X86_CPU_FEATURE(0x7, 0, EBX, 0)
144 #define X86_FEATURE_TSC_ADJUST KVM_X86_CPU_FEATURE(0x7, 0, EBX, 1)
145 #define X86_FEATURE_SGX KVM_X86_CPU_FEATURE(0x7, 0, EBX, 2)
146 #define X86_FEATURE_HLE KVM_X86_CPU_FEATURE(0x7, 0, EBX, 4)
147 #define X86_FEATURE_SMEP KVM_X86_CPU_FEATURE(0x7, 0, EBX, 7)
148 #define X86_FEATURE_INVPCID KVM_X86_CPU_FEATURE(0x7, 0, EBX, 10)
149 #define X86_FEATURE_RTM KVM_X86_CPU_FEATURE(0x7, 0, EBX, 11)
150 #define X86_FEATURE_MPX KVM_X86_CPU_FEATURE(0x7, 0, EBX, 14)
151 #define X86_FEATURE_SMAP KVM_X86_CPU_FEATURE(0x7, 0, EBX, 20)
152 #define X86_FEATURE_PCOMMIT KVM_X86_CPU_FEATURE(0x7, 0, EBX, 22)
153 #define X86_FEATURE_CLFLUSHOPT KVM_X86_CPU_FEATURE(0x7, 0, EBX, 23)
154 #define X86_FEATURE_CLWB KVM_X86_CPU_FEATURE(0x7, 0, EBX, 24)
155 #define X86_FEATURE_UMIP KVM_X86_CPU_FEATURE(0x7, 0, ECX, 2)
156 #define X86_FEATURE_PKU KVM_X86_CPU_FEATURE(0x7, 0, ECX, 3)
157 #define X86_FEATURE_OSPKE KVM_X86_CPU_FEATURE(0x7, 0, ECX, 4)
158 #define X86_FEATURE_LA57 KVM_X86_CPU_FEATURE(0x7, 0, ECX, 16)
159 #define X86_FEATURE_RDPID KVM_X86_CPU_FEATURE(0x7, 0, ECX, 22)
160 #define X86_FEATURE_SGX_LC KVM_X86_CPU_FEATURE(0x7, 0, ECX, 30)
161 #define X86_FEATURE_SHSTK KVM_X86_CPU_FEATURE(0x7, 0, ECX, 7)
162 #define X86_FEATURE_IBT KVM_X86_CPU_FEATURE(0x7, 0, EDX, 20)
163 #define X86_FEATURE_AMX_TILE KVM_X86_CPU_FEATURE(0x7, 0, EDX, 24)
164 #define X86_FEATURE_SPEC_CTRL KVM_X86_CPU_FEATURE(0x7, 0, EDX, 26)
165 #define X86_FEATURE_ARCH_CAPABILITIES KVM_X86_CPU_FEATURE(0x7, 0, EDX, 29)
166 #define X86_FEATURE_PKS KVM_X86_CPU_FEATURE(0x7, 0, ECX, 31)
167 #define X86_FEATURE_XTILECFG KVM_X86_CPU_FEATURE(0xD, 0, EAX, 17)
168 #define X86_FEATURE_XTILEDATA KVM_X86_CPU_FEATURE(0xD, 0, EAX, 18)
169 #define X86_FEATURE_XSAVES KVM_X86_CPU_FEATURE(0xD, 1, EAX, 3)
170 #define X86_FEATURE_XFD KVM_X86_CPU_FEATURE(0xD, 1, EAX, 4)
171 #define X86_FEATURE_XTILEDATA_XFD KVM_X86_CPU_FEATURE(0xD, 18, ECX, 2)
174 * Extended Leafs, a.k.a. AMD defined
176 #define X86_FEATURE_SVM KVM_X86_CPU_FEATURE(0x80000001, 0, ECX, 2)
177 #define X86_FEATURE_NX KVM_X86_CPU_FEATURE(0x80000001, 0, EDX, 20)
178 #define X86_FEATURE_GBPAGES KVM_X86_CPU_FEATURE(0x80000001, 0, EDX, 26)
179 #define X86_FEATURE_RDTSCP KVM_X86_CPU_FEATURE(0x80000001, 0, EDX, 27)
180 #define X86_FEATURE_LM KVM_X86_CPU_FEATURE(0x80000001, 0, EDX, 29)
181 #define X86_FEATURE_INVTSC KVM_X86_CPU_FEATURE(0x80000007, 0, EDX, 8)
182 #define X86_FEATURE_RDPRU KVM_X86_CPU_FEATURE(0x80000008, 0, EBX, 4)
183 #define X86_FEATURE_AMD_IBPB KVM_X86_CPU_FEATURE(0x80000008, 0, EBX, 12)
184 #define X86_FEATURE_NPT KVM_X86_CPU_FEATURE(0x8000000A, 0, EDX, 0)
185 #define X86_FEATURE_LBRV KVM_X86_CPU_FEATURE(0x8000000A, 0, EDX, 1)
186 #define X86_FEATURE_NRIPS KVM_X86_CPU_FEATURE(0x8000000A, 0, EDX, 3)
187 #define X86_FEATURE_TSCRATEMSR KVM_X86_CPU_FEATURE(0x8000000A, 0, EDX, 4)
188 #define X86_FEATURE_PAUSEFILTER KVM_X86_CPU_FEATURE(0x8000000A, 0, EDX, 10)
189 #define X86_FEATURE_PFTHRESHOLD KVM_X86_CPU_FEATURE(0x8000000A, 0, EDX, 12)
190 #define X86_FEATURE_VGIF KVM_X86_CPU_FEATURE(0x8000000A, 0, EDX, 16)
191 #define X86_FEATURE_SEV KVM_X86_CPU_FEATURE(0x8000001F, 0, EAX, 1)
192 #define X86_FEATURE_SEV_ES KVM_X86_CPU_FEATURE(0x8000001F, 0, EAX, 3)
195 * KVM defined paravirt features.
197 #define X86_FEATURE_KVM_CLOCKSOURCE KVM_X86_CPU_FEATURE(0x40000001, 0, EAX, 0)
198 #define X86_FEATURE_KVM_NOP_IO_DELAY KVM_X86_CPU_FEATURE(0x40000001, 0, EAX, 1)
199 #define X86_FEATURE_KVM_MMU_OP KVM_X86_CPU_FEATURE(0x40000001, 0, EAX, 2)
200 #define X86_FEATURE_KVM_CLOCKSOURCE2 KVM_X86_CPU_FEATURE(0x40000001, 0, EAX, 3)
201 #define X86_FEATURE_KVM_ASYNC_PF KVM_X86_CPU_FEATURE(0x40000001, 0, EAX, 4)
202 #define X86_FEATURE_KVM_STEAL_TIME KVM_X86_CPU_FEATURE(0x40000001, 0, EAX, 5)
203 #define X86_FEATURE_KVM_PV_EOI KVM_X86_CPU_FEATURE(0x40000001, 0, EAX, 6)
204 #define X86_FEATURE_KVM_PV_UNHALT KVM_X86_CPU_FEATURE(0x40000001, 0, EAX, 7)
205 /* Bit 8 apparently isn't used?!?! */
206 #define X86_FEATURE_KVM_PV_TLB_FLUSH KVM_X86_CPU_FEATURE(0x40000001, 0, EAX, 9)
207 #define X86_FEATURE_KVM_ASYNC_PF_VMEXIT KVM_X86_CPU_FEATURE(0x40000001, 0, EAX, 10)
208 #define X86_FEATURE_KVM_PV_SEND_IPI KVM_X86_CPU_FEATURE(0x40000001, 0, EAX, 11)
209 #define X86_FEATURE_KVM_POLL_CONTROL KVM_X86_CPU_FEATURE(0x40000001, 0, EAX, 12)
210 #define X86_FEATURE_KVM_PV_SCHED_YIELD KVM_X86_CPU_FEATURE(0x40000001, 0, EAX, 13)
211 #define X86_FEATURE_KVM_ASYNC_PF_INT KVM_X86_CPU_FEATURE(0x40000001, 0, EAX, 14)
212 #define X86_FEATURE_KVM_MSI_EXT_DEST_ID KVM_X86_CPU_FEATURE(0x40000001, 0, EAX, 15)
213 #define X86_FEATURE_KVM_HC_MAP_GPA_RANGE KVM_X86_CPU_FEATURE(0x40000001, 0, EAX, 16)
214 #define X86_FEATURE_KVM_MIGRATION_CONTROL KVM_X86_CPU_FEATURE(0x40000001, 0, EAX, 17)
217 * Same idea as X86_FEATURE_XXX, but X86_PROPERTY_XXX retrieves a multi-bit
218 * value/property as opposed to a single-bit feature. Again, pack the info
219 * into a 64-bit value to pass by value with no overhead.
221 struct kvm_x86_cpu_property {
228 #define KVM_X86_CPU_PROPERTY(fn, idx, gpr, low_bit, high_bit) \
230 struct kvm_x86_cpu_property property = { \
233 .reg = KVM_CPUID_##gpr, \
235 .hi_bit = high_bit, \
238 kvm_static_assert(low_bit < high_bit); \
239 kvm_static_assert((fn & 0xc0000000) == 0 || \
240 (fn & 0xc0000000) == 0x40000000 || \
241 (fn & 0xc0000000) == 0x80000000 || \
242 (fn & 0xc0000000) == 0xc0000000); \
243 kvm_static_assert(idx < BIT(sizeof(property.index) * BITS_PER_BYTE)); \
247 #define X86_PROPERTY_MAX_BASIC_LEAF KVM_X86_CPU_PROPERTY(0, 0, EAX, 0, 31)
248 #define X86_PROPERTY_PMU_VERSION KVM_X86_CPU_PROPERTY(0xa, 0, EAX, 0, 7)
249 #define X86_PROPERTY_PMU_NR_GP_COUNTERS KVM_X86_CPU_PROPERTY(0xa, 0, EAX, 8, 15)
250 #define X86_PROPERTY_PMU_GP_COUNTERS_BIT_WIDTH KVM_X86_CPU_PROPERTY(0xa, 0, EAX, 16, 23)
251 #define X86_PROPERTY_PMU_EBX_BIT_VECTOR_LENGTH KVM_X86_CPU_PROPERTY(0xa, 0, EAX, 24, 31)
252 #define X86_PROPERTY_PMU_EVENTS_MASK KVM_X86_CPU_PROPERTY(0xa, 0, EBX, 0, 7)
253 #define X86_PROPERTY_PMU_FIXED_COUNTERS_BITMASK KVM_X86_CPU_PROPERTY(0xa, 0, ECX, 0, 31)
254 #define X86_PROPERTY_PMU_NR_FIXED_COUNTERS KVM_X86_CPU_PROPERTY(0xa, 0, EDX, 0, 4)
255 #define X86_PROPERTY_PMU_FIXED_COUNTERS_BIT_WIDTH KVM_X86_CPU_PROPERTY(0xa, 0, EDX, 5, 12)
257 #define X86_PROPERTY_SUPPORTED_XCR0_LO KVM_X86_CPU_PROPERTY(0xd, 0, EAX, 0, 31)
258 #define X86_PROPERTY_XSTATE_MAX_SIZE_XCR0 KVM_X86_CPU_PROPERTY(0xd, 0, EBX, 0, 31)
259 #define X86_PROPERTY_XSTATE_MAX_SIZE KVM_X86_CPU_PROPERTY(0xd, 0, ECX, 0, 31)
260 #define X86_PROPERTY_SUPPORTED_XCR0_HI KVM_X86_CPU_PROPERTY(0xd, 0, EDX, 0, 31)
262 #define X86_PROPERTY_XSTATE_TILE_SIZE KVM_X86_CPU_PROPERTY(0xd, 18, EAX, 0, 31)
263 #define X86_PROPERTY_XSTATE_TILE_OFFSET KVM_X86_CPU_PROPERTY(0xd, 18, EBX, 0, 31)
264 #define X86_PROPERTY_AMX_MAX_PALETTE_TABLES KVM_X86_CPU_PROPERTY(0x1d, 0, EAX, 0, 31)
265 #define X86_PROPERTY_AMX_TOTAL_TILE_BYTES KVM_X86_CPU_PROPERTY(0x1d, 1, EAX, 0, 15)
266 #define X86_PROPERTY_AMX_BYTES_PER_TILE KVM_X86_CPU_PROPERTY(0x1d, 1, EAX, 16, 31)
267 #define X86_PROPERTY_AMX_BYTES_PER_ROW KVM_X86_CPU_PROPERTY(0x1d, 1, EBX, 0, 15)
268 #define X86_PROPERTY_AMX_NR_TILE_REGS KVM_X86_CPU_PROPERTY(0x1d, 1, EBX, 16, 31)
269 #define X86_PROPERTY_AMX_MAX_ROWS KVM_X86_CPU_PROPERTY(0x1d, 1, ECX, 0, 15)
271 #define X86_PROPERTY_MAX_KVM_LEAF KVM_X86_CPU_PROPERTY(0x40000000, 0, EAX, 0, 31)
273 #define X86_PROPERTY_MAX_EXT_LEAF KVM_X86_CPU_PROPERTY(0x80000000, 0, EAX, 0, 31)
274 #define X86_PROPERTY_MAX_PHY_ADDR KVM_X86_CPU_PROPERTY(0x80000008, 0, EAX, 0, 7)
275 #define X86_PROPERTY_MAX_VIRT_ADDR KVM_X86_CPU_PROPERTY(0x80000008, 0, EAX, 8, 15)
276 #define X86_PROPERTY_PHYS_ADDR_REDUCTION KVM_X86_CPU_PROPERTY(0x8000001F, 0, EBX, 6, 11)
278 #define X86_PROPERTY_MAX_CENTAUR_LEAF KVM_X86_CPU_PROPERTY(0xC0000000, 0, EAX, 0, 31)
281 * Intel's architectural PMU events are bizarre. They have a "feature" bit
282 * that indicates the feature is _not_ supported, and a property that states
283 * the length of the bit mask of unsupported features. A feature is supported
284 * if the size of the bit mask is larger than the "unavailable" bit, and said
287 * Wrap the "unavailable" feature to simplify checking whether or not a given
288 * architectural event is supported.
290 struct kvm_x86_pmu_feature {
291 struct kvm_x86_cpu_feature anti_feature;
293 #define KVM_X86_PMU_FEATURE(name, __bit) \
295 struct kvm_x86_pmu_feature feature = { \
296 .anti_feature = KVM_X86_CPU_FEATURE(0xa, 0, EBX, __bit), \
302 #define X86_PMU_FEATURE_BRANCH_INSNS_RETIRED KVM_X86_PMU_FEATURE(BRANCH_INSNS_RETIRED, 5)
304 static inline unsigned int x86_family(unsigned int eax)
308 x86 = (eax >> 8) & 0xf;
311 x86 += (eax >> 20) & 0xff;
316 static inline unsigned int x86_model(unsigned int eax)
318 return ((eax >> 12) & 0xf0) | ((eax >> 4) & 0x0f);
321 /* Page table bitfield declarations */
322 #define PTE_PRESENT_MASK BIT_ULL(0)
323 #define PTE_WRITABLE_MASK BIT_ULL(1)
324 #define PTE_USER_MASK BIT_ULL(2)
325 #define PTE_ACCESSED_MASK BIT_ULL(5)
326 #define PTE_DIRTY_MASK BIT_ULL(6)
327 #define PTE_LARGE_MASK BIT_ULL(7)
328 #define PTE_GLOBAL_MASK BIT_ULL(8)
329 #define PTE_NX_MASK BIT_ULL(63)
331 #define PHYSICAL_PAGE_MASK GENMASK_ULL(51, 12)
333 #define PAGE_SHIFT 12
334 #define PAGE_SIZE (1ULL << PAGE_SHIFT)
335 #define PAGE_MASK (~(PAGE_SIZE-1) & PHYSICAL_PAGE_MASK)
337 #define HUGEPAGE_SHIFT(x) (PAGE_SHIFT + (((x) - 1) * 9))
338 #define HUGEPAGE_SIZE(x) (1UL << HUGEPAGE_SHIFT(x))
339 #define HUGEPAGE_MASK(x) (~(HUGEPAGE_SIZE(x) - 1) & PHYSICAL_PAGE_MASK)
341 #define PTE_GET_PA(pte) ((pte) & PHYSICAL_PAGE_MASK)
342 #define PTE_GET_PFN(pte) (PTE_GET_PA(pte) >> PAGE_SHIFT)
344 /* General Registers in 64-Bit Mode */
367 unsigned base1:8, type:4, s:1, dpl:2, p:1;
368 unsigned limit1:4, avl:1, l:1, db:1, g:1, base2:8;
371 } __attribute__((packed));
376 } __attribute__((packed));
378 struct kvm_x86_state {
379 struct kvm_xsave *xsave;
380 struct kvm_vcpu_events events;
381 struct kvm_mp_state mp_state;
382 struct kvm_regs regs;
383 struct kvm_xcrs xcrs;
384 struct kvm_sregs sregs;
385 struct kvm_debugregs debugregs;
387 struct kvm_nested_state nested;
390 struct kvm_msrs msrs;
393 static inline uint64_t get_desc64_base(const struct desc64 *desc)
395 return ((uint64_t)desc->base3 << 32) |
396 (desc->base0 | ((desc->base1) << 16) | ((desc->base2) << 24));
399 static inline uint64_t rdtsc(void)
404 * The lfence is to wait (on Intel CPUs) until all previous
405 * instructions have been executed. If software requires RDTSC to be
406 * executed prior to execution of any subsequent instruction, it can
407 * execute LFENCE immediately after RDTSC
409 __asm__ __volatile__("lfence; rdtsc; lfence" : "=a"(eax), "=d"(edx));
410 tsc_val = ((uint64_t)edx) << 32 | eax;
414 static inline uint64_t rdtscp(uint32_t *aux)
418 __asm__ __volatile__("rdtscp" : "=a"(eax), "=d"(edx), "=c"(*aux));
419 return ((uint64_t)edx) << 32 | eax;
422 static inline uint64_t rdmsr(uint32_t msr)
426 __asm__ __volatile__("rdmsr" : "=a"(a), "=d"(d) : "c"(msr) : "memory");
428 return a | ((uint64_t) d << 32);
431 static inline void wrmsr(uint32_t msr, uint64_t value)
434 uint32_t d = value >> 32;
436 __asm__ __volatile__("wrmsr" :: "a"(a), "d"(d), "c"(msr) : "memory");
440 static inline uint16_t inw(uint16_t port)
444 __asm__ __volatile__("in %%dx, %%ax"
445 : /* output */ "=a" (tmp)
446 : /* input */ "d" (port));
451 static inline uint16_t get_es(void)
455 __asm__ __volatile__("mov %%es, %[es]"
456 : /* output */ [es]"=rm"(es));
460 static inline uint16_t get_cs(void)
464 __asm__ __volatile__("mov %%cs, %[cs]"
465 : /* output */ [cs]"=rm"(cs));
469 static inline uint16_t get_ss(void)
473 __asm__ __volatile__("mov %%ss, %[ss]"
474 : /* output */ [ss]"=rm"(ss));
478 static inline uint16_t get_ds(void)
482 __asm__ __volatile__("mov %%ds, %[ds]"
483 : /* output */ [ds]"=rm"(ds));
487 static inline uint16_t get_fs(void)
491 __asm__ __volatile__("mov %%fs, %[fs]"
492 : /* output */ [fs]"=rm"(fs));
496 static inline uint16_t get_gs(void)
500 __asm__ __volatile__("mov %%gs, %[gs]"
501 : /* output */ [gs]"=rm"(gs));
505 static inline uint16_t get_tr(void)
509 __asm__ __volatile__("str %[tr]"
510 : /* output */ [tr]"=rm"(tr));
514 static inline uint64_t get_cr0(void)
518 __asm__ __volatile__("mov %%cr0, %[cr0]"
519 : /* output */ [cr0]"=r"(cr0));
523 static inline uint64_t get_cr3(void)
527 __asm__ __volatile__("mov %%cr3, %[cr3]"
528 : /* output */ [cr3]"=r"(cr3));
532 static inline uint64_t get_cr4(void)
536 __asm__ __volatile__("mov %%cr4, %[cr4]"
537 : /* output */ [cr4]"=r"(cr4));
541 static inline void set_cr4(uint64_t val)
543 __asm__ __volatile__("mov %0, %%cr4" : : "r" (val) : "memory");
546 static inline u64 xgetbv(u32 index)
550 __asm__ __volatile__("xgetbv;"
551 : "=a" (eax), "=d" (edx)
553 return eax | ((u64)edx << 32);
556 static inline void xsetbv(u32 index, u64 value)
559 u32 edx = value >> 32;
561 __asm__ __volatile__("xsetbv" :: "a" (eax), "d" (edx), "c" (index));
564 static inline void wrpkru(u32 pkru)
566 /* Note, ECX and EDX are architecturally required to be '0'. */
567 asm volatile(".byte 0x0f,0x01,0xef\n\t"
568 : : "a" (pkru), "c"(0), "d"(0));
571 static inline struct desc_ptr get_gdt(void)
574 __asm__ __volatile__("sgdt %[gdt]"
575 : /* output */ [gdt]"=m"(gdt));
579 static inline struct desc_ptr get_idt(void)
582 __asm__ __volatile__("sidt %[idt]"
583 : /* output */ [idt]"=m"(idt));
587 static inline void outl(uint16_t port, uint32_t value)
589 __asm__ __volatile__("outl %%eax, %%dx" : : "d"(port), "a"(value));
592 static inline void __cpuid(uint32_t function, uint32_t index,
593 uint32_t *eax, uint32_t *ebx,
594 uint32_t *ecx, uint32_t *edx)
604 : "0" (*eax), "2" (*ecx)
608 static inline void cpuid(uint32_t function,
609 uint32_t *eax, uint32_t *ebx,
610 uint32_t *ecx, uint32_t *edx)
612 return __cpuid(function, 0, eax, ebx, ecx, edx);
615 static inline uint32_t this_cpu_fms(void)
617 uint32_t eax, ebx, ecx, edx;
619 cpuid(1, &eax, &ebx, &ecx, &edx);
623 static inline uint32_t this_cpu_family(void)
625 return x86_family(this_cpu_fms());
628 static inline uint32_t this_cpu_model(void)
630 return x86_model(this_cpu_fms());
633 static inline bool this_cpu_vendor_string_is(const char *vendor)
635 const uint32_t *chunk = (const uint32_t *)vendor;
636 uint32_t eax, ebx, ecx, edx;
638 cpuid(0, &eax, &ebx, &ecx, &edx);
639 return (ebx == chunk[0] && edx == chunk[1] && ecx == chunk[2]);
642 static inline bool this_cpu_is_intel(void)
644 return this_cpu_vendor_string_is("GenuineIntel");
648 * Exclude early K5 samples with a vendor string of "AMDisbetter!"
650 static inline bool this_cpu_is_amd(void)
652 return this_cpu_vendor_string_is("AuthenticAMD");
655 static inline uint32_t __this_cpu_has(uint32_t function, uint32_t index,
656 uint8_t reg, uint8_t lo, uint8_t hi)
660 __cpuid(function, index,
661 &gprs[KVM_CPUID_EAX], &gprs[KVM_CPUID_EBX],
662 &gprs[KVM_CPUID_ECX], &gprs[KVM_CPUID_EDX]);
664 return (gprs[reg] & GENMASK(hi, lo)) >> lo;
667 static inline bool this_cpu_has(struct kvm_x86_cpu_feature feature)
669 return __this_cpu_has(feature.function, feature.index,
670 feature.reg, feature.bit, feature.bit);
673 static inline uint32_t this_cpu_property(struct kvm_x86_cpu_property property)
675 return __this_cpu_has(property.function, property.index,
676 property.reg, property.lo_bit, property.hi_bit);
679 static __always_inline bool this_cpu_has_p(struct kvm_x86_cpu_property property)
683 switch (property.function & 0xc0000000) {
685 max_leaf = this_cpu_property(X86_PROPERTY_MAX_BASIC_LEAF);
688 max_leaf = this_cpu_property(X86_PROPERTY_MAX_KVM_LEAF);
691 max_leaf = this_cpu_property(X86_PROPERTY_MAX_EXT_LEAF);
694 max_leaf = this_cpu_property(X86_PROPERTY_MAX_CENTAUR_LEAF);
696 return max_leaf >= property.function;
699 static inline bool this_pmu_has(struct kvm_x86_pmu_feature feature)
701 uint32_t nr_bits = this_cpu_property(X86_PROPERTY_PMU_EBX_BIT_VECTOR_LENGTH);
703 return nr_bits > feature.anti_feature.bit &&
704 !this_cpu_has(feature.anti_feature);
707 static __always_inline uint64_t this_cpu_supported_xcr0(void)
709 if (!this_cpu_has_p(X86_PROPERTY_SUPPORTED_XCR0_LO))
712 return this_cpu_property(X86_PROPERTY_SUPPORTED_XCR0_LO) |
713 ((uint64_t)this_cpu_property(X86_PROPERTY_SUPPORTED_XCR0_HI) << 32);
716 typedef u32 __attribute__((vector_size(16))) sse128_t;
717 #define __sse128_u union { sse128_t vec; u64 as_u64[2]; u32 as_u32[4]; }
718 #define sse128_lo(x) ({ __sse128_u t; t.vec = x; t.as_u64[0]; })
719 #define sse128_hi(x) ({ __sse128_u t; t.vec = x; t.as_u64[1]; })
721 static inline void read_sse_reg(int reg, sse128_t *data)
725 asm("movdqa %%xmm0, %0" : "=m"(*data));
728 asm("movdqa %%xmm1, %0" : "=m"(*data));
731 asm("movdqa %%xmm2, %0" : "=m"(*data));
734 asm("movdqa %%xmm3, %0" : "=m"(*data));
737 asm("movdqa %%xmm4, %0" : "=m"(*data));
740 asm("movdqa %%xmm5, %0" : "=m"(*data));
743 asm("movdqa %%xmm6, %0" : "=m"(*data));
746 asm("movdqa %%xmm7, %0" : "=m"(*data));
753 static inline void write_sse_reg(int reg, const sse128_t *data)
757 asm("movdqa %0, %%xmm0" : : "m"(*data));
760 asm("movdqa %0, %%xmm1" : : "m"(*data));
763 asm("movdqa %0, %%xmm2" : : "m"(*data));
766 asm("movdqa %0, %%xmm3" : : "m"(*data));
769 asm("movdqa %0, %%xmm4" : : "m"(*data));
772 asm("movdqa %0, %%xmm5" : : "m"(*data));
775 asm("movdqa %0, %%xmm6" : : "m"(*data));
778 asm("movdqa %0, %%xmm7" : : "m"(*data));
785 static inline void cpu_relax(void)
787 asm volatile("rep; nop" ::: "memory");
791 __asm__ __volatile__( \
796 __asm__ __volatile__( \
800 struct kvm_x86_state *vcpu_save_state(struct kvm_vcpu *vcpu);
801 void vcpu_load_state(struct kvm_vcpu *vcpu, struct kvm_x86_state *state);
802 void kvm_x86_state_cleanup(struct kvm_x86_state *state);
804 const struct kvm_msr_list *kvm_get_msr_index_list(void);
805 const struct kvm_msr_list *kvm_get_feature_msr_index_list(void);
806 bool kvm_msr_is_in_save_restore_list(uint32_t msr_index);
807 uint64_t kvm_get_feature_msr(uint64_t msr_index);
809 static inline void vcpu_msrs_get(struct kvm_vcpu *vcpu,
810 struct kvm_msrs *msrs)
812 int r = __vcpu_ioctl(vcpu, KVM_GET_MSRS, msrs);
814 TEST_ASSERT(r == msrs->nmsrs,
815 "KVM_GET_MSRS failed, r: %i (failed on MSR %x)",
816 r, r < 0 || r >= msrs->nmsrs ? -1 : msrs->entries[r].index);
818 static inline void vcpu_msrs_set(struct kvm_vcpu *vcpu, struct kvm_msrs *msrs)
820 int r = __vcpu_ioctl(vcpu, KVM_SET_MSRS, msrs);
822 TEST_ASSERT(r == msrs->nmsrs,
823 "KVM_SET_MSRS failed, r: %i (failed on MSR %x)",
824 r, r < 0 || r >= msrs->nmsrs ? -1 : msrs->entries[r].index);
826 static inline void vcpu_debugregs_get(struct kvm_vcpu *vcpu,
827 struct kvm_debugregs *debugregs)
829 vcpu_ioctl(vcpu, KVM_GET_DEBUGREGS, debugregs);
831 static inline void vcpu_debugregs_set(struct kvm_vcpu *vcpu,
832 struct kvm_debugregs *debugregs)
834 vcpu_ioctl(vcpu, KVM_SET_DEBUGREGS, debugregs);
836 static inline void vcpu_xsave_get(struct kvm_vcpu *vcpu,
837 struct kvm_xsave *xsave)
839 vcpu_ioctl(vcpu, KVM_GET_XSAVE, xsave);
841 static inline void vcpu_xsave2_get(struct kvm_vcpu *vcpu,
842 struct kvm_xsave *xsave)
844 vcpu_ioctl(vcpu, KVM_GET_XSAVE2, xsave);
846 static inline void vcpu_xsave_set(struct kvm_vcpu *vcpu,
847 struct kvm_xsave *xsave)
849 vcpu_ioctl(vcpu, KVM_SET_XSAVE, xsave);
851 static inline void vcpu_xcrs_get(struct kvm_vcpu *vcpu,
852 struct kvm_xcrs *xcrs)
854 vcpu_ioctl(vcpu, KVM_GET_XCRS, xcrs);
856 static inline void vcpu_xcrs_set(struct kvm_vcpu *vcpu, struct kvm_xcrs *xcrs)
858 vcpu_ioctl(vcpu, KVM_SET_XCRS, xcrs);
861 const struct kvm_cpuid_entry2 *get_cpuid_entry(const struct kvm_cpuid2 *cpuid,
862 uint32_t function, uint32_t index);
863 const struct kvm_cpuid2 *kvm_get_supported_cpuid(void);
864 const struct kvm_cpuid2 *kvm_get_supported_hv_cpuid(void);
865 const struct kvm_cpuid2 *vcpu_get_supported_hv_cpuid(struct kvm_vcpu *vcpu);
867 static inline uint32_t kvm_cpu_fms(void)
869 return get_cpuid_entry(kvm_get_supported_cpuid(), 0x1, 0)->eax;
872 static inline uint32_t kvm_cpu_family(void)
874 return x86_family(kvm_cpu_fms());
877 static inline uint32_t kvm_cpu_model(void)
879 return x86_model(kvm_cpu_fms());
882 bool kvm_cpuid_has(const struct kvm_cpuid2 *cpuid,
883 struct kvm_x86_cpu_feature feature);
885 static inline bool kvm_cpu_has(struct kvm_x86_cpu_feature feature)
887 return kvm_cpuid_has(kvm_get_supported_cpuid(), feature);
890 uint32_t kvm_cpuid_property(const struct kvm_cpuid2 *cpuid,
891 struct kvm_x86_cpu_property property);
893 static inline uint32_t kvm_cpu_property(struct kvm_x86_cpu_property property)
895 return kvm_cpuid_property(kvm_get_supported_cpuid(), property);
898 static __always_inline bool kvm_cpu_has_p(struct kvm_x86_cpu_property property)
902 switch (property.function & 0xc0000000) {
904 max_leaf = kvm_cpu_property(X86_PROPERTY_MAX_BASIC_LEAF);
907 max_leaf = kvm_cpu_property(X86_PROPERTY_MAX_KVM_LEAF);
910 max_leaf = kvm_cpu_property(X86_PROPERTY_MAX_EXT_LEAF);
913 max_leaf = kvm_cpu_property(X86_PROPERTY_MAX_CENTAUR_LEAF);
915 return max_leaf >= property.function;
918 static inline bool kvm_pmu_has(struct kvm_x86_pmu_feature feature)
920 uint32_t nr_bits = kvm_cpu_property(X86_PROPERTY_PMU_EBX_BIT_VECTOR_LENGTH);
922 return nr_bits > feature.anti_feature.bit &&
923 !kvm_cpu_has(feature.anti_feature);
926 static __always_inline uint64_t kvm_cpu_supported_xcr0(void)
928 if (!kvm_cpu_has_p(X86_PROPERTY_SUPPORTED_XCR0_LO))
931 return kvm_cpu_property(X86_PROPERTY_SUPPORTED_XCR0_LO) |
932 ((uint64_t)kvm_cpu_property(X86_PROPERTY_SUPPORTED_XCR0_HI) << 32);
935 static inline size_t kvm_cpuid2_size(int nr_entries)
937 return sizeof(struct kvm_cpuid2) +
938 sizeof(struct kvm_cpuid_entry2) * nr_entries;
942 * Allocate a "struct kvm_cpuid2* instance, with the 0-length arrary of
943 * entries sized to hold @nr_entries. The caller is responsible for freeing
946 static inline struct kvm_cpuid2 *allocate_kvm_cpuid2(int nr_entries)
948 struct kvm_cpuid2 *cpuid;
950 cpuid = malloc(kvm_cpuid2_size(nr_entries));
951 TEST_ASSERT(cpuid, "-ENOMEM when allocating kvm_cpuid2");
953 cpuid->nent = nr_entries;
958 void vcpu_init_cpuid(struct kvm_vcpu *vcpu, const struct kvm_cpuid2 *cpuid);
959 void vcpu_set_hv_cpuid(struct kvm_vcpu *vcpu);
961 static inline struct kvm_cpuid_entry2 *__vcpu_get_cpuid_entry(struct kvm_vcpu *vcpu,
965 return (struct kvm_cpuid_entry2 *)get_cpuid_entry(vcpu->cpuid,
969 static inline struct kvm_cpuid_entry2 *vcpu_get_cpuid_entry(struct kvm_vcpu *vcpu,
972 return __vcpu_get_cpuid_entry(vcpu, function, 0);
975 static inline int __vcpu_set_cpuid(struct kvm_vcpu *vcpu)
979 TEST_ASSERT(vcpu->cpuid, "Must do vcpu_init_cpuid() first");
980 r = __vcpu_ioctl(vcpu, KVM_SET_CPUID2, vcpu->cpuid);
984 /* On success, refresh the cache to pick up adjustments made by KVM. */
985 vcpu_ioctl(vcpu, KVM_GET_CPUID2, vcpu->cpuid);
989 static inline void vcpu_set_cpuid(struct kvm_vcpu *vcpu)
991 TEST_ASSERT(vcpu->cpuid, "Must do vcpu_init_cpuid() first");
992 vcpu_ioctl(vcpu, KVM_SET_CPUID2, vcpu->cpuid);
994 /* Refresh the cache to pick up adjustments made by KVM. */
995 vcpu_ioctl(vcpu, KVM_GET_CPUID2, vcpu->cpuid);
998 void vcpu_set_cpuid_maxphyaddr(struct kvm_vcpu *vcpu, uint8_t maxphyaddr);
1000 void vcpu_clear_cpuid_entry(struct kvm_vcpu *vcpu, uint32_t function);
1001 void vcpu_set_or_clear_cpuid_feature(struct kvm_vcpu *vcpu,
1002 struct kvm_x86_cpu_feature feature,
1005 static inline void vcpu_set_cpuid_feature(struct kvm_vcpu *vcpu,
1006 struct kvm_x86_cpu_feature feature)
1008 vcpu_set_or_clear_cpuid_feature(vcpu, feature, true);
1012 static inline void vcpu_clear_cpuid_feature(struct kvm_vcpu *vcpu,
1013 struct kvm_x86_cpu_feature feature)
1015 vcpu_set_or_clear_cpuid_feature(vcpu, feature, false);
1018 uint64_t vcpu_get_msr(struct kvm_vcpu *vcpu, uint64_t msr_index);
1019 int _vcpu_set_msr(struct kvm_vcpu *vcpu, uint64_t msr_index, uint64_t msr_value);
1022 * Assert on an MSR access(es) and pretty print the MSR name when possible.
1023 * Note, the caller provides the stringified name so that the name of macro is
1024 * printed, not the value the macro resolves to (due to macro expansion).
1026 #define TEST_ASSERT_MSR(cond, fmt, msr, str, args...) \
1028 if (__builtin_constant_p(msr)) { \
1029 TEST_ASSERT(cond, fmt, str, args); \
1030 } else if (!(cond)) { \
1033 snprintf(buf, sizeof(buf), "MSR 0x%x", msr); \
1034 TEST_ASSERT(cond, fmt, buf, args); \
1039 * Returns true if KVM should return the last written value when reading an MSR
1040 * from userspace, e.g. the MSR isn't a command MSR, doesn't emulate state that
1041 * is changing, etc. This is NOT an exhaustive list! The intent is to filter
1042 * out MSRs that are not durable _and_ that a selftest wants to write.
1044 static inline bool is_durable_msr(uint32_t msr)
1046 return msr != MSR_IA32_TSC;
1049 #define vcpu_set_msr(vcpu, msr, val) \
1051 uint64_t r, v = val; \
1053 TEST_ASSERT_MSR(_vcpu_set_msr(vcpu, msr, v) == 1, \
1054 "KVM_SET_MSRS failed on %s, value = 0x%lx", msr, #msr, v); \
1055 if (!is_durable_msr(msr)) \
1057 r = vcpu_get_msr(vcpu, msr); \
1058 TEST_ASSERT_MSR(r == v, "Set %s to '0x%lx', got back '0x%lx'", msr, #msr, v, r);\
1061 void kvm_get_cpu_address_width(unsigned int *pa_bits, unsigned int *va_bits);
1062 bool vm_is_unrestricted_guest(struct kvm_vm *vm);
1065 uint64_t rax, rcx, rdx, rbx;
1066 uint64_t rbp, rsi, rdi;
1067 uint64_t r8, r9, r10, r11;
1068 uint64_t r12, r13, r14, r15;
1070 uint64_t error_code;
1086 uint32_t offset2; uint32_t reserved;
1089 void vm_init_descriptor_tables(struct kvm_vm *vm);
1090 void vcpu_init_descriptor_tables(struct kvm_vcpu *vcpu);
1091 void vm_install_exception_handler(struct kvm_vm *vm, int vector,
1092 void (*handler)(struct ex_regs *));
1094 /* If a toddler were to say "abracadabra". */
1095 #define KVM_EXCEPTION_MAGIC 0xabacadabaULL
1098 * KVM selftest exception fixup uses registers to coordinate with the exception
1099 * handler, versus the kernel's in-memory tables and KVM-Unit-Tests's in-memory
1100 * per-CPU data. Using only registers avoids having to map memory into the
1101 * guest, doesn't require a valid, stable GS.base, and reduces the risk of
1102 * for recursive faults when accessing memory in the handler. The downside to
1103 * using registers is that it restricts what registers can be used by the actual
1104 * instruction. But, selftests are 64-bit only, making register* pressure a
1105 * minor concern. Use r9-r11 as they are volatile, i.e. don't need to be saved
1106 * by the callee, and except for r11 are not implicit parameters to any
1107 * instructions. Ideally, fixup would use r8-r10 and thus avoid implicit
1108 * parameters entirely, but Hyper-V's hypercall ABI uses r8 and testing Hyper-V
1109 * is higher priority than testing non-faulting SYSCALL/SYSRET.
1111 * Note, the fixup handler deliberately does not handle #DE, i.e. the vector
1112 * is guaranteed to be non-zero on fault.
1117 * r11 = new RIP on fault
1120 * r9 = exception vector (non-zero)
1123 #define KVM_ASM_SAFE(insn) \
1124 "mov $" __stringify(KVM_EXCEPTION_MAGIC) ", %%r9\n\t" \
1125 "lea 1f(%%rip), %%r10\n\t" \
1126 "lea 2f(%%rip), %%r11\n\t" \
1128 "xor %%r9, %%r9\n\t" \
1130 "mov %%r9b, %[vector]\n\t" \
1131 "mov %%r10, %[error_code]\n\t"
1133 #define KVM_ASM_SAFE_OUTPUTS(v, ec) [vector] "=qm"(v), [error_code] "=rm"(ec)
1134 #define KVM_ASM_SAFE_CLOBBERS "r9", "r10", "r11"
1136 #define kvm_asm_safe(insn, inputs...) \
1138 uint64_t ign_error_code; \
1141 asm volatile(KVM_ASM_SAFE(insn) \
1142 : KVM_ASM_SAFE_OUTPUTS(vector, ign_error_code) \
1144 : KVM_ASM_SAFE_CLOBBERS); \
1148 #define kvm_asm_safe_ec(insn, error_code, inputs...) \
1152 asm volatile(KVM_ASM_SAFE(insn) \
1153 : KVM_ASM_SAFE_OUTPUTS(vector, error_code) \
1155 : KVM_ASM_SAFE_CLOBBERS); \
1159 static inline uint8_t rdmsr_safe(uint32_t msr, uint64_t *val)
1161 uint64_t error_code;
1165 asm volatile(KVM_ASM_SAFE("rdmsr")
1166 : "=a"(a), "=d"(d), KVM_ASM_SAFE_OUTPUTS(vector, error_code)
1168 : KVM_ASM_SAFE_CLOBBERS);
1170 *val = (uint64_t)a | ((uint64_t)d << 32);
1174 static inline uint8_t wrmsr_safe(uint32_t msr, uint64_t val)
1176 return kvm_asm_safe("wrmsr", "a"(val & -1u), "d"(val >> 32), "c"(msr));
1179 static inline uint8_t xsetbv_safe(uint32_t index, uint64_t value)
1182 u32 edx = value >> 32;
1184 return kvm_asm_safe("xsetbv", "a" (eax), "d" (edx), "c" (index));
1187 bool kvm_is_tdp_enabled(void);
1189 uint64_t *__vm_get_page_table_entry(struct kvm_vm *vm, uint64_t vaddr,
1191 uint64_t *vm_get_page_table_entry(struct kvm_vm *vm, uint64_t vaddr);
1193 uint64_t kvm_hypercall(uint64_t nr, uint64_t a0, uint64_t a1, uint64_t a2,
1195 uint64_t __xen_hypercall(uint64_t nr, uint64_t a0, void *a1);
1196 void xen_hypercall(uint64_t nr, uint64_t a0, void *a1);
1198 static inline uint64_t __kvm_hypercall_map_gpa_range(uint64_t gpa,
1199 uint64_t size, uint64_t flags)
1201 return kvm_hypercall(KVM_HC_MAP_GPA_RANGE, gpa, size >> PAGE_SHIFT, flags, 0);
1204 static inline void kvm_hypercall_map_gpa_range(uint64_t gpa, uint64_t size,
1207 uint64_t ret = __kvm_hypercall_map_gpa_range(gpa, size, flags);
1212 void __vm_xsave_require_permission(uint64_t xfeature, const char *name);
1214 #define vm_xsave_require_permission(xfeature) \
1215 __vm_xsave_require_permission(xfeature, #xfeature)
1226 #define PG_LEVEL_SHIFT(_level) ((_level - 1) * 9 + 12)
1227 #define PG_LEVEL_SIZE(_level) (1ull << PG_LEVEL_SHIFT(_level))
1229 #define PG_SIZE_4K PG_LEVEL_SIZE(PG_LEVEL_4K)
1230 #define PG_SIZE_2M PG_LEVEL_SIZE(PG_LEVEL_2M)
1231 #define PG_SIZE_1G PG_LEVEL_SIZE(PG_LEVEL_1G)
1233 void __virt_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr, int level);
1234 void virt_map_level(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr,
1235 uint64_t nr_bytes, int level);
1238 * Basic CPU control in CR0
1240 #define X86_CR0_PE (1UL<<0) /* Protection Enable */
1241 #define X86_CR0_MP (1UL<<1) /* Monitor Coprocessor */
1242 #define X86_CR0_EM (1UL<<2) /* Emulation */
1243 #define X86_CR0_TS (1UL<<3) /* Task Switched */
1244 #define X86_CR0_ET (1UL<<4) /* Extension Type */
1245 #define X86_CR0_NE (1UL<<5) /* Numeric Error */
1246 #define X86_CR0_WP (1UL<<16) /* Write Protect */
1247 #define X86_CR0_AM (1UL<<18) /* Alignment Mask */
1248 #define X86_CR0_NW (1UL<<29) /* Not Write-through */
1249 #define X86_CR0_CD (1UL<<30) /* Cache Disable */
1250 #define X86_CR0_PG (1UL<<31) /* Paging */
1252 #define PFERR_PRESENT_BIT 0
1253 #define PFERR_WRITE_BIT 1
1254 #define PFERR_USER_BIT 2
1255 #define PFERR_RSVD_BIT 3
1256 #define PFERR_FETCH_BIT 4
1257 #define PFERR_PK_BIT 5
1258 #define PFERR_SGX_BIT 15
1259 #define PFERR_GUEST_FINAL_BIT 32
1260 #define PFERR_GUEST_PAGE_BIT 33
1261 #define PFERR_IMPLICIT_ACCESS_BIT 48
1263 #define PFERR_PRESENT_MASK BIT(PFERR_PRESENT_BIT)
1264 #define PFERR_WRITE_MASK BIT(PFERR_WRITE_BIT)
1265 #define PFERR_USER_MASK BIT(PFERR_USER_BIT)
1266 #define PFERR_RSVD_MASK BIT(PFERR_RSVD_BIT)
1267 #define PFERR_FETCH_MASK BIT(PFERR_FETCH_BIT)
1268 #define PFERR_PK_MASK BIT(PFERR_PK_BIT)
1269 #define PFERR_SGX_MASK BIT(PFERR_SGX_BIT)
1270 #define PFERR_GUEST_FINAL_MASK BIT_ULL(PFERR_GUEST_FINAL_BIT)
1271 #define PFERR_GUEST_PAGE_MASK BIT_ULL(PFERR_GUEST_PAGE_BIT)
1272 #define PFERR_IMPLICIT_ACCESS BIT_ULL(PFERR_IMPLICIT_ACCESS_BIT)
1274 #endif /* SELFTEST_KVM_PROCESSOR_H */