Merge branch 'core-objtool-for-linus' of git://git.kernel.org/pub/scm/linux/kernel...
[linux-block.git] / tools / testing / selftests / kvm / include / x86_64 / processor.h
CommitLineData
7a338472 1/* SPDX-License-Identifier: GPL-2.0-only */
783e9e51 2/*
cc68765d 3 * tools/testing/selftests/kvm/include/x86_64/processor.h
783e9e51
PB
4 *
5 * Copyright (C) 2018, Google LLC.
783e9e51
PB
6 */
7
cc68765d
AJ
8#ifndef SELFTEST_KVM_PROCESSOR_H
9#define SELFTEST_KVM_PROCESSOR_H
783e9e51
PB
10
11#include <assert.h>
12#include <stdint.h>
13
a5543d34
SC
14#include <asm/msr-index.h>
15
783e9e51
PB
16#define X86_EFLAGS_FIXED (1u << 1)
17
18#define X86_CR4_VME (1ul << 0)
19#define X86_CR4_PVI (1ul << 1)
20#define X86_CR4_TSD (1ul << 2)
21#define X86_CR4_DE (1ul << 3)
22#define X86_CR4_PSE (1ul << 4)
23#define X86_CR4_PAE (1ul << 5)
24#define X86_CR4_MCE (1ul << 6)
25#define X86_CR4_PGE (1ul << 7)
26#define X86_CR4_PCE (1ul << 8)
27#define X86_CR4_OSFXSR (1ul << 9)
28#define X86_CR4_OSXMMEXCPT (1ul << 10)
29#define X86_CR4_UMIP (1ul << 11)
30#define X86_CR4_VMXE (1ul << 13)
31#define X86_CR4_SMXE (1ul << 14)
32#define X86_CR4_FSGSBASE (1ul << 16)
33#define X86_CR4_PCIDE (1ul << 17)
34#define X86_CR4_OSXSAVE (1ul << 18)
35#define X86_CR4_SMEP (1ul << 20)
36#define X86_CR4_SMAP (1ul << 21)
37#define X86_CR4_PKE (1ul << 22)
38
20ba262f
EA
39/* General Registers in 64-Bit Mode */
40struct gpr64_regs {
41 u64 rax;
42 u64 rcx;
43 u64 rdx;
44 u64 rbx;
45 u64 rsp;
46 u64 rbp;
47 u64 rsi;
48 u64 rdi;
49 u64 r8;
50 u64 r9;
51 u64 r10;
52 u64 r11;
53 u64 r12;
54 u64 r13;
55 u64 r14;
56 u64 r15;
57};
58
783e9e51
PB
59struct desc64 {
60 uint16_t limit0;
61 uint16_t base0;
2305339e
PB
62 unsigned base1:8, s:1, type:4, dpl:2, p:1;
63 unsigned limit1:4, avl:1, l:1, db:1, g:1, base2:8;
783e9e51
PB
64 uint32_t base3;
65 uint32_t zero1;
66} __attribute__((packed));
67
68struct desc_ptr {
69 uint16_t size;
70 uint64_t address;
71} __attribute__((packed));
72
73static inline uint64_t get_desc64_base(const struct desc64 *desc)
74{
75 return ((uint64_t)desc->base3 << 32) |
76 (desc->base0 | ((desc->base1) << 16) | ((desc->base2) << 24));
77}
78
79static inline uint64_t rdtsc(void)
80{
81 uint32_t eax, edx;
82
83 /*
84 * The lfence is to wait (on Intel CPUs) until all previous
85 * instructions have been executed.
86 */
87 __asm__ __volatile__("lfence; rdtsc" : "=a"(eax), "=d"(edx));
88 return ((uint64_t)edx) << 32 | eax;
89}
90
91static inline uint64_t rdtscp(uint32_t *aux)
92{
93 uint32_t eax, edx;
94
95 __asm__ __volatile__("rdtscp" : "=a"(eax), "=d"(edx), "=c"(*aux));
96 return ((uint64_t)edx) << 32 | eax;
97}
98
99static inline uint64_t rdmsr(uint32_t msr)
100{
101 uint32_t a, d;
102
103 __asm__ __volatile__("rdmsr" : "=a"(a), "=d"(d) : "c"(msr) : "memory");
104
105 return a | ((uint64_t) d << 32);
106}
107
108static inline void wrmsr(uint32_t msr, uint64_t value)
109{
110 uint32_t a = value;
111 uint32_t d = value >> 32;
112
113 __asm__ __volatile__("wrmsr" :: "a"(a), "d"(d), "c"(msr) : "memory");
114}
115
116
117static inline uint16_t inw(uint16_t port)
118{
119 uint16_t tmp;
120
121 __asm__ __volatile__("in %%dx, %%ax"
122 : /* output */ "=a" (tmp)
123 : /* input */ "d" (port));
124
125 return tmp;
126}
127
128static inline uint16_t get_es(void)
129{
130 uint16_t es;
131
132 __asm__ __volatile__("mov %%es, %[es]"
133 : /* output */ [es]"=rm"(es));
134 return es;
135}
136
137static inline uint16_t get_cs(void)
138{
139 uint16_t cs;
140
141 __asm__ __volatile__("mov %%cs, %[cs]"
142 : /* output */ [cs]"=rm"(cs));
143 return cs;
144}
145
146static inline uint16_t get_ss(void)
147{
148 uint16_t ss;
149
150 __asm__ __volatile__("mov %%ss, %[ss]"
151 : /* output */ [ss]"=rm"(ss));
152 return ss;
153}
154
155static inline uint16_t get_ds(void)
156{
157 uint16_t ds;
158
159 __asm__ __volatile__("mov %%ds, %[ds]"
160 : /* output */ [ds]"=rm"(ds));
161 return ds;
162}
163
164static inline uint16_t get_fs(void)
165{
166 uint16_t fs;
167
168 __asm__ __volatile__("mov %%fs, %[fs]"
169 : /* output */ [fs]"=rm"(fs));
170 return fs;
171}
172
173static inline uint16_t get_gs(void)
174{
175 uint16_t gs;
176
177 __asm__ __volatile__("mov %%gs, %[gs]"
178 : /* output */ [gs]"=rm"(gs));
179 return gs;
180}
181
182static inline uint16_t get_tr(void)
183{
184 uint16_t tr;
185
186 __asm__ __volatile__("str %[tr]"
187 : /* output */ [tr]"=rm"(tr));
188 return tr;
189}
190
191static inline uint64_t get_cr0(void)
192{
193 uint64_t cr0;
194
195 __asm__ __volatile__("mov %%cr0, %[cr0]"
196 : /* output */ [cr0]"=r"(cr0));
197 return cr0;
198}
199
200static inline uint64_t get_cr3(void)
201{
202 uint64_t cr3;
203
204 __asm__ __volatile__("mov %%cr3, %[cr3]"
205 : /* output */ [cr3]"=r"(cr3));
206 return cr3;
207}
208
209static inline uint64_t get_cr4(void)
210{
211 uint64_t cr4;
212
213 __asm__ __volatile__("mov %%cr4, %[cr4]"
214 : /* output */ [cr4]"=r"(cr4));
215 return cr4;
216}
217
218static inline void set_cr4(uint64_t val)
219{
220 __asm__ __volatile__("mov %0, %%cr4" : : "r" (val) : "memory");
221}
222
1ecaabed 223static inline struct desc_ptr get_gdt(void)
783e9e51
PB
224{
225 struct desc_ptr gdt;
226 __asm__ __volatile__("sgdt %[gdt]"
227 : /* output */ [gdt]"=m"(gdt));
1ecaabed 228 return gdt;
783e9e51
PB
229}
230
1ecaabed 231static inline struct desc_ptr get_idt(void)
783e9e51
PB
232{
233 struct desc_ptr idt;
234 __asm__ __volatile__("sidt %[idt]"
235 : /* output */ [idt]"=m"(idt));
1ecaabed 236 return idt;
783e9e51
PB
237}
238
239#define SET_XMM(__var, __xmm) \
240 asm volatile("movq %0, %%"#__xmm : : "r"(__var) : #__xmm)
241
242static inline void set_xmm(int n, unsigned long val)
243{
244 switch (n) {
245 case 0:
246 SET_XMM(val, xmm0);
247 break;
248 case 1:
249 SET_XMM(val, xmm1);
250 break;
251 case 2:
252 SET_XMM(val, xmm2);
253 break;
254 case 3:
255 SET_XMM(val, xmm3);
256 break;
257 case 4:
258 SET_XMM(val, xmm4);
259 break;
260 case 5:
261 SET_XMM(val, xmm5);
262 break;
263 case 6:
264 SET_XMM(val, xmm6);
265 break;
266 case 7:
267 SET_XMM(val, xmm7);
268 break;
269 }
270}
271
272typedef unsigned long v1di __attribute__ ((vector_size (8)));
273static inline unsigned long get_xmm(int n)
274{
275 assert(n >= 0 && n <= 7);
276
277 register v1di xmm0 __asm__("%xmm0");
278 register v1di xmm1 __asm__("%xmm1");
279 register v1di xmm2 __asm__("%xmm2");
280 register v1di xmm3 __asm__("%xmm3");
281 register v1di xmm4 __asm__("%xmm4");
282 register v1di xmm5 __asm__("%xmm5");
283 register v1di xmm6 __asm__("%xmm6");
284 register v1di xmm7 __asm__("%xmm7");
285 switch (n) {
286 case 0:
287 return (unsigned long)xmm0;
288 case 1:
289 return (unsigned long)xmm1;
290 case 2:
291 return (unsigned long)xmm2;
292 case 3:
293 return (unsigned long)xmm3;
294 case 4:
295 return (unsigned long)xmm4;
296 case 5:
297 return (unsigned long)xmm5;
298 case 6:
299 return (unsigned long)xmm6;
300 case 7:
301 return (unsigned long)xmm7;
302 }
303 return 0;
304}
305
9dba988e
AL
306bool is_intel_cpu(void);
307
fa3899ad
PB
308struct kvm_x86_state;
309struct kvm_x86_state *vcpu_save_state(struct kvm_vm *vm, uint32_t vcpuid);
eabe7881
AJ
310void vcpu_load_state(struct kvm_vm *vm, uint32_t vcpuid,
311 struct kvm_x86_state *state);
312
c90992bf
AL
313struct kvm_msr_list *kvm_get_msr_index_list(void);
314
eabe7881
AJ
315struct kvm_cpuid2 *kvm_get_supported_cpuid(void);
316void vcpu_set_cpuid(struct kvm_vm *vm, uint32_t vcpuid,
317 struct kvm_cpuid2 *cpuid);
318
319struct kvm_cpuid_entry2 *
320kvm_get_supported_cpuid_index(uint32_t function, uint32_t index);
321
322static inline struct kvm_cpuid_entry2 *
323kvm_get_supported_cpuid_entry(uint32_t function)
324{
325 return kvm_get_supported_cpuid_index(function, 0);
326}
327
328uint64_t vcpu_get_msr(struct kvm_vm *vm, uint32_t vcpuid, uint64_t msr_index);
c90992bf
AL
329int _vcpu_set_msr(struct kvm_vm *vm, uint32_t vcpuid, uint64_t msr_index,
330 uint64_t msr_value);
eabe7881
AJ
331void vcpu_set_msr(struct kvm_vm *vm, uint32_t vcpuid, uint64_t msr_index,
332 uint64_t msr_value);
fa3899ad 333
c90992bf
AL
334uint32_t kvm_get_cpuid_max_basic(void);
335uint32_t kvm_get_cpuid_max_extended(void);
567a9f1e
PX
336void kvm_get_cpu_address_width(unsigned int *pa_bits, unsigned int *va_bits);
337
783e9e51
PB
338/*
339 * Basic CPU control in CR0
340 */
341#define X86_CR0_PE (1UL<<0) /* Protection Enable */
342#define X86_CR0_MP (1UL<<1) /* Monitor Coprocessor */
343#define X86_CR0_EM (1UL<<2) /* Emulation */
344#define X86_CR0_TS (1UL<<3) /* Task Switched */
345#define X86_CR0_ET (1UL<<4) /* Extension Type */
346#define X86_CR0_NE (1UL<<5) /* Numeric Error */
347#define X86_CR0_WP (1UL<<16) /* Write Protect */
348#define X86_CR0_AM (1UL<<18) /* Alignment Mask */
349#define X86_CR0_NW (1UL<<29) /* Not Write-through */
350#define X86_CR0_CD (1UL<<30) /* Cache Disable */
351#define X86_CR0_PG (1UL<<31) /* Paging */
352
79904c9d
VK
353#define APIC_BASE_MSR 0x800
354#define X2APIC_ENABLE (1UL << 10)
355#define APIC_ICR 0x300
356#define APIC_DEST_SELF 0x40000
357#define APIC_DEST_ALLINC 0x80000
358#define APIC_DEST_ALLBUT 0xC0000
359#define APIC_ICR_RR_MASK 0x30000
360#define APIC_ICR_RR_INVALID 0x00000
361#define APIC_ICR_RR_INPROG 0x10000
362#define APIC_ICR_RR_VALID 0x20000
363#define APIC_INT_LEVELTRIG 0x08000
364#define APIC_INT_ASSERT 0x04000
365#define APIC_ICR_BUSY 0x01000
366#define APIC_DEST_LOGICAL 0x00800
367#define APIC_DEST_PHYSICAL 0x00000
368#define APIC_DM_FIXED 0x00000
369#define APIC_DM_FIXED_MASK 0x00700
370#define APIC_DM_LOWEST 0x00100
371#define APIC_DM_SMI 0x00200
372#define APIC_DM_REMRD 0x00300
373#define APIC_DM_NMI 0x00400
374#define APIC_DM_INIT 0x00500
375#define APIC_DM_STARTUP 0x00600
376#define APIC_DM_EXTINT 0x00700
377#define APIC_VECTOR_MASK 0x000FF
378#define APIC_ICR2 0x310
379
09444420 380/* VMX_EPT_VPID_CAP bits */
a5543d34 381#define VMX_EPT_VPID_CAP_AD_BITS (1ULL << 21)
783e9e51 382
cc68765d 383#endif /* SELFTEST_KVM_PROCESSOR_H */