KVM: VMX: Add and use X86_CR4_PDPTR_BITS when !enable_ept
[linux-2.6-block.git] / arch / x86 / kvm / kvm_cache_regs.h
CommitLineData
b2441318 1/* SPDX-License-Identifier: GPL-2.0 */
5fdbf976
MT
2#ifndef ASM_KVM_CACHE_REGS_H
3#define ASM_KVM_CACHE_REGS_H
4
8ba2e525
SC
5#include <linux/kvm_host.h>
6
8ae09912
AK
7#define KVM_POSSIBLE_CR0_GUEST_BITS X86_CR0_TS
8#define KVM_POSSIBLE_CR4_GUEST_BITS \
9 (X86_CR4_PVI | X86_CR4_DE | X86_CR4_PCE | X86_CR4_OSFXSR \
30031c2b 10 | X86_CR4_OSXMMEXCPT | X86_CR4_PGE | X86_CR4_TSD | X86_CR4_FSGSBASE)
8ae09912 11
5ec60aad 12#define X86_CR4_TLBFLUSH_BITS (X86_CR4_PGE | X86_CR4_PCIDE | X86_CR4_PAE | X86_CR4_SMEP)
a37ebdce 13#define X86_CR4_PDPTR_BITS (X86_CR4_PGE | X86_CR4_PSE | X86_CR4_PAE | X86_CR4_SMEP)
5ec60aad 14
de3cd117
SC
15#define BUILD_KVM_GPR_ACCESSORS(lname, uname) \
16static __always_inline unsigned long kvm_##lname##_read(struct kvm_vcpu *vcpu)\
17{ \
18 return vcpu->arch.regs[VCPU_REGS_##uname]; \
19} \
20static __always_inline void kvm_##lname##_write(struct kvm_vcpu *vcpu, \
21 unsigned long val) \
22{ \
23 vcpu->arch.regs[VCPU_REGS_##uname] = val; \
24}
25BUILD_KVM_GPR_ACCESSORS(rax, RAX)
26BUILD_KVM_GPR_ACCESSORS(rbx, RBX)
27BUILD_KVM_GPR_ACCESSORS(rcx, RCX)
28BUILD_KVM_GPR_ACCESSORS(rdx, RDX)
29BUILD_KVM_GPR_ACCESSORS(rbp, RBP)
30BUILD_KVM_GPR_ACCESSORS(rsi, RSI)
31BUILD_KVM_GPR_ACCESSORS(rdi, RDI)
32#ifdef CONFIG_X86_64
33BUILD_KVM_GPR_ACCESSORS(r8, R8)
34BUILD_KVM_GPR_ACCESSORS(r9, R9)
35BUILD_KVM_GPR_ACCESSORS(r10, R10)
36BUILD_KVM_GPR_ACCESSORS(r11, R11)
37BUILD_KVM_GPR_ACCESSORS(r12, R12)
38BUILD_KVM_GPR_ACCESSORS(r13, R13)
39BUILD_KVM_GPR_ACCESSORS(r14, R14)
40BUILD_KVM_GPR_ACCESSORS(r15, R15)
41#endif
42
aed89418
SC
43static inline bool kvm_register_is_available(struct kvm_vcpu *vcpu,
44 enum kvm_reg reg)
45{
46 return test_bit(reg, (unsigned long *)&vcpu->arch.regs_avail);
47}
48
49static inline bool kvm_register_is_dirty(struct kvm_vcpu *vcpu,
50 enum kvm_reg reg)
51{
52 return test_bit(reg, (unsigned long *)&vcpu->arch.regs_dirty);
53}
54
55static inline void kvm_register_mark_available(struct kvm_vcpu *vcpu,
56 enum kvm_reg reg)
57{
58 __set_bit(reg, (unsigned long *)&vcpu->arch.regs_avail);
59}
60
329675dd
ML
61static inline void kvm_register_clear_available(struct kvm_vcpu *vcpu,
62 enum kvm_reg reg)
63{
64 __clear_bit(reg, (unsigned long *)&vcpu->arch.regs_avail);
65 __clear_bit(reg, (unsigned long *)&vcpu->arch.regs_dirty);
66}
67
aed89418
SC
68static inline void kvm_register_mark_dirty(struct kvm_vcpu *vcpu,
69 enum kvm_reg reg)
70{
71 __set_bit(reg, (unsigned long *)&vcpu->arch.regs_avail);
72 __set_bit(reg, (unsigned long *)&vcpu->arch.regs_dirty);
73}
74
27b4a9c4
SC
75/*
76 * The "raw" register helpers are only for cases where the full 64 bits of a
77 * register are read/written irrespective of current vCPU mode. In other words,
78 * odds are good you shouldn't be using the raw variants.
79 */
80static inline unsigned long kvm_register_read_raw(struct kvm_vcpu *vcpu, int reg)
5fdbf976 81{
489cbcf0
SC
82 if (WARN_ON_ONCE((unsigned int)reg >= NR_VCPU_REGS))
83 return 0;
84
cb3c1e2f 85 if (!kvm_register_is_available(vcpu, reg))
b3646477 86 static_call(kvm_x86_cache_reg)(vcpu, reg);
5fdbf976
MT
87
88 return vcpu->arch.regs[reg];
89}
90
27b4a9c4
SC
91static inline void kvm_register_write_raw(struct kvm_vcpu *vcpu, int reg,
92 unsigned long val)
5fdbf976 93{
489cbcf0
SC
94 if (WARN_ON_ONCE((unsigned int)reg >= NR_VCPU_REGS))
95 return;
96
5fdbf976 97 vcpu->arch.regs[reg] = val;
cb3c1e2f 98 kvm_register_mark_dirty(vcpu, reg);
5fdbf976
MT
99}
100
101static inline unsigned long kvm_rip_read(struct kvm_vcpu *vcpu)
102{
27b4a9c4 103 return kvm_register_read_raw(vcpu, VCPU_REGS_RIP);
5fdbf976
MT
104}
105
106static inline void kvm_rip_write(struct kvm_vcpu *vcpu, unsigned long val)
107{
27b4a9c4 108 kvm_register_write_raw(vcpu, VCPU_REGS_RIP, val);
5fdbf976
MT
109}
110
e9c16c78
PB
111static inline unsigned long kvm_rsp_read(struct kvm_vcpu *vcpu)
112{
27b4a9c4 113 return kvm_register_read_raw(vcpu, VCPU_REGS_RSP);
e9c16c78
PB
114}
115
116static inline void kvm_rsp_write(struct kvm_vcpu *vcpu, unsigned long val)
117{
27b4a9c4 118 kvm_register_write_raw(vcpu, VCPU_REGS_RSP, val);
e9c16c78
PB
119}
120
6de4f3ad
AK
121static inline u64 kvm_pdptr_read(struct kvm_vcpu *vcpu, int index)
122{
08acfa18
AK
123 might_sleep(); /* on svm */
124
cb3c1e2f 125 if (!kvm_register_is_available(vcpu, VCPU_EXREG_PDPTR))
b3646477 126 static_call(kvm_x86_cache_reg)(vcpu, VCPU_EXREG_PDPTR);
6de4f3ad 127
ff03a073 128 return vcpu->arch.walk_mmu->pdptrs[index];
6de4f3ad
AK
129}
130
6dba9403
ML
131static inline void kvm_pdptr_write(struct kvm_vcpu *vcpu, int index, u64 value)
132{
133 vcpu->arch.walk_mmu->pdptrs[index] = value;
134}
135
4d4ec087
AK
136static inline ulong kvm_read_cr0_bits(struct kvm_vcpu *vcpu, ulong mask)
137{
8ae09912 138 ulong tmask = mask & KVM_POSSIBLE_CR0_GUEST_BITS;
bd31fe49
SC
139 if ((tmask & vcpu->arch.cr0_guest_owned_bits) &&
140 !kvm_register_is_available(vcpu, VCPU_EXREG_CR0))
b3646477 141 static_call(kvm_x86_cache_reg)(vcpu, VCPU_EXREG_CR0);
4d4ec087
AK
142 return vcpu->arch.cr0 & mask;
143}
144
145static inline ulong kvm_read_cr0(struct kvm_vcpu *vcpu)
146{
147 return kvm_read_cr0_bits(vcpu, ~0UL);
148}
149
fc78f519
AK
150static inline ulong kvm_read_cr4_bits(struct kvm_vcpu *vcpu, ulong mask)
151{
8ae09912 152 ulong tmask = mask & KVM_POSSIBLE_CR4_GUEST_BITS;
f98c1e77
SC
153 if ((tmask & vcpu->arch.cr4_guest_owned_bits) &&
154 !kvm_register_is_available(vcpu, VCPU_EXREG_CR4))
b3646477 155 static_call(kvm_x86_cache_reg)(vcpu, VCPU_EXREG_CR4);
fc78f519
AK
156 return vcpu->arch.cr4 & mask;
157}
158
9f8fe504
AK
159static inline ulong kvm_read_cr3(struct kvm_vcpu *vcpu)
160{
cb3c1e2f 161 if (!kvm_register_is_available(vcpu, VCPU_EXREG_CR3))
b3646477 162 static_call(kvm_x86_cache_reg)(vcpu, VCPU_EXREG_CR3);
9f8fe504
AK
163 return vcpu->arch.cr3;
164}
165
fc78f519
AK
166static inline ulong kvm_read_cr4(struct kvm_vcpu *vcpu)
167{
168 return kvm_read_cr4_bits(vcpu, ~0UL);
169}
170
2acf923e
DC
171static inline u64 kvm_read_edx_eax(struct kvm_vcpu *vcpu)
172{
de3cd117
SC
173 return (kvm_rax_read(vcpu) & -1u)
174 | ((u64)(kvm_rdx_read(vcpu) & -1u) << 32);
2acf923e
DC
175}
176
ec9e60b2
JR
177static inline void enter_guest_mode(struct kvm_vcpu *vcpu)
178{
179 vcpu->arch.hflags |= HF_GUEST_MASK;
d5a0483f 180 vcpu->stat.guest_mode = 1;
ec9e60b2
JR
181}
182
183static inline void leave_guest_mode(struct kvm_vcpu *vcpu)
184{
185 vcpu->arch.hflags &= ~HF_GUEST_MASK;
e40ff1d6
LA
186
187 if (vcpu->arch.load_eoi_exitmap_pending) {
188 vcpu->arch.load_eoi_exitmap_pending = false;
189 kvm_make_request(KVM_REQ_LOAD_EOI_EXITMAP, vcpu);
190 }
d5a0483f
KS
191
192 vcpu->stat.guest_mode = 0;
ec9e60b2
JR
193}
194
195static inline bool is_guest_mode(struct kvm_vcpu *vcpu)
196{
197 return vcpu->arch.hflags & HF_GUEST_MASK;
198}
199
f077825a
PB
200static inline bool is_smm(struct kvm_vcpu *vcpu)
201{
202 return vcpu->arch.hflags & HF_SMM_MASK;
203}
204
5fdbf976 205#endif