KVM: x86/mmu: Rename reset_rsvds_bits_mask()
[linux-block.git] / arch / x86 / kvm / mmu.h
CommitLineData
b2441318 1/* SPDX-License-Identifier: GPL-2.0 */
1d737c8a
ZX
2#ifndef __KVM_X86_MMU_H
3#define __KVM_X86_MMU_H
4
edf88417 5#include <linux/kvm_host.h>
fc78f519 6#include "kvm_cache_regs.h"
89786147 7#include "cpuid.h"
1d737c8a 8
8c6d6adc
SY
9#define PT64_PT_BITS 9
10#define PT64_ENT_PER_PAGE (1 << PT64_PT_BITS)
11#define PT32_PT_BITS 10
12#define PT32_ENT_PER_PAGE (1 << PT32_PT_BITS)
13
14#define PT_WRITABLE_SHIFT 1
be94f6b7 15#define PT_USER_SHIFT 2
8c6d6adc
SY
16
17#define PT_PRESENT_MASK (1ULL << 0)
18#define PT_WRITABLE_MASK (1ULL << PT_WRITABLE_SHIFT)
be94f6b7 19#define PT_USER_MASK (1ULL << PT_USER_SHIFT)
8c6d6adc
SY
20#define PT_PWT_MASK (1ULL << 3)
21#define PT_PCD_MASK (1ULL << 4)
1b7fcd32
AK
22#define PT_ACCESSED_SHIFT 5
23#define PT_ACCESSED_MASK (1ULL << PT_ACCESSED_SHIFT)
8ea667f2
AK
24#define PT_DIRTY_SHIFT 6
25#define PT_DIRTY_MASK (1ULL << PT_DIRTY_SHIFT)
6fd01b71
AK
26#define PT_PAGE_SIZE_SHIFT 7
27#define PT_PAGE_SIZE_MASK (1ULL << PT_PAGE_SIZE_SHIFT)
8c6d6adc
SY
28#define PT_PAT_MASK (1ULL << 7)
29#define PT_GLOBAL_MASK (1ULL << 8)
30#define PT64_NX_SHIFT 63
31#define PT64_NX_MASK (1ULL << PT64_NX_SHIFT)
32
33#define PT_PAT_SHIFT 7
34#define PT_DIR_PAT_SHIFT 12
35#define PT_DIR_PAT_MASK (1ULL << PT_DIR_PAT_SHIFT)
36
37#define PT32_DIR_PSE36_SIZE 4
38#define PT32_DIR_PSE36_SHIFT 13
39#define PT32_DIR_PSE36_MASK \
40 (((1ULL << PT32_DIR_PSE36_SIZE) - 1) << PT32_DIR_PSE36_SHIFT)
41
855feb67 42#define PT64_ROOT_5LEVEL 5
2a7266a8 43#define PT64_ROOT_4LEVEL 4
8c6d6adc
SY
44#define PT32_ROOT_LEVEL 2
45#define PT32E_ROOT_LEVEL 3
46
a91a7c70
LJ
47#define KVM_MMU_CR4_ROLE_BITS (X86_CR4_PSE | X86_CR4_PAE | X86_CR4_LA57 | \
48 X86_CR4_SMEP | X86_CR4_SMAP | X86_CR4_PKE)
20f632bd
SC
49
50#define KVM_MMU_CR0_ROLE_BITS (X86_CR0_PG | X86_CR0_WP)
d6174299 51#define KVM_MMU_EFER_ROLE_BITS (EFER_LME | EFER_NX)
20f632bd 52
eb79cd00 53static __always_inline u64 rsvd_bits(int s, int e)
d1431483 54{
eb79cd00
SC
55 BUILD_BUG_ON(__builtin_constant_p(e) && __builtin_constant_p(s) && e < s);
56
57 if (__builtin_constant_p(e))
58 BUILD_BUG_ON(e > 63);
59 else
60 e &= 63;
61
d1cd3ce9
YZ
62 if (e < s)
63 return 0;
64
2f80d502 65 return ((2ULL << (e - s)) - 1) << s;
d1431483
TC
66}
67
86931ff7
SC
68/*
69 * The number of non-reserved physical address bits irrespective of features
70 * that repurpose legal bits, e.g. MKTME.
71 */
72extern u8 __read_mostly shadow_phys_bits;
73
74static inline gfn_t kvm_mmu_max_gfn(void)
75{
76 /*
77 * Note that this uses the host MAXPHYADDR, not the guest's.
78 * EPT/NPT cannot support GPAs that would exceed host.MAXPHYADDR;
79 * assuming KVM is running on bare metal, guest accesses beyond
80 * host.MAXPHYADDR will hit a #PF(RSVD) and never cause a vmexit
81 * (either EPT Violation/Misconfig or #NPF), and so KVM will never
82 * install a SPTE for such addresses. If KVM is running as a VM
83 * itself, on the other hand, it might see a MAXPHYADDR that is less
84 * than hardware's real MAXPHYADDR. Using the host MAXPHYADDR
85 * disallows such SPTEs entirely and simplifies the TDP MMU.
86 */
87 int max_gpa_bits = likely(tdp_enabled) ? shadow_phys_bits : 52;
88
89 return (1ULL << (max_gpa_bits - PAGE_SHIFT)) - 1;
90}
91
8120337a 92void kvm_mmu_set_mmio_spte_mask(u64 mmio_value, u64 mmio_mask, u64 access_mask);
e7b7bdea 93void kvm_mmu_set_ept_masks(bool has_ad_bits, bool has_exec_only);
b37fbea6 94
c9060662 95void kvm_init_mmu(struct kvm_vcpu *vcpu);
dbc4739b
SC
96void kvm_init_shadow_npt_mmu(struct kvm_vcpu *vcpu, unsigned long cr0,
97 unsigned long cr4, u64 efer, gpa_t nested_cr3);
ae1e2d10 98void kvm_init_shadow_ept_mmu(struct kvm_vcpu *vcpu, bool execonly,
cc022ae1
LJ
99 int huge_page_level, bool accessed_dirty,
100 gpa_t new_eptp);
9bc1f09f 101bool kvm_can_do_async_pf(struct kvm_vcpu *vcpu);
1261bfa3 102int kvm_handle_page_fault(struct kvm_vcpu *vcpu, u64 error_code,
d0006530 103 u64 fault_address, char *insn, int insn_len);
94d8b056 104
61a1773e
SC
105int kvm_mmu_load(struct kvm_vcpu *vcpu);
106void kvm_mmu_unload(struct kvm_vcpu *vcpu);
527d5cd7 107void kvm_mmu_free_obsolete_roots(struct kvm_vcpu *vcpu);
61a1773e 108void kvm_mmu_sync_roots(struct kvm_vcpu *vcpu);
61b05a9f 109void kvm_mmu_sync_prev_roots(struct kvm_vcpu *vcpu);
61a1773e 110
1d737c8a
ZX
111static inline int kvm_mmu_reload(struct kvm_vcpu *vcpu)
112{
b9e5603c 113 if (likely(vcpu->arch.mmu->root.hpa != INVALID_PAGE))
1d737c8a
ZX
114 return 0;
115
116 return kvm_mmu_load(vcpu);
117}
118
c9470a2e
JS
119static inline unsigned long kvm_get_pcid(struct kvm_vcpu *vcpu, gpa_t cr3)
120{
121 BUILD_BUG_ON((X86_CR3_PCID_MASK & PAGE_MASK) != 0);
122
123 return kvm_read_cr4_bits(vcpu, X86_CR4_PCIDE)
124 ? cr3 & X86_CR3_PCID_MASK
125 : 0;
126}
127
128static inline unsigned long kvm_get_active_pcid(struct kvm_vcpu *vcpu)
129{
130 return kvm_get_pcid(vcpu, kvm_read_cr3(vcpu));
131}
132
689f3bf2 133static inline void kvm_mmu_load_pgd(struct kvm_vcpu *vcpu)
6e42782f 134{
b9e5603c 135 u64 root_hpa = vcpu->arch.mmu->root.hpa;
2a40b900
SC
136
137 if (!VALID_PAGE(root_hpa))
138 return;
139
e83bc09c 140 static_call(kvm_x86_load_mmu_pgd)(vcpu, root_hpa,
a972e29c 141 vcpu->arch.mmu->root_role.level);
7a02674d
SC
142}
143
97d64b78 144/*
f13577e8
PB
145 * Check if a given access (described through the I/D, W/R and U/S bits of a
146 * page fault error code pfec) causes a permission fault with the given PTE
147 * access rights (in ACC_* format).
148 *
149 * Return zero if the access does not fault; return the page fault error code
150 * if the access faults.
97d64b78 151 */
f13577e8 152static inline u8 permission_fault(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
be94f6b7 153 unsigned pte_access, unsigned pte_pkey,
5b22bbe7 154 u64 access)
bebb106a 155{
5b22bbe7
LJ
156 /* strip nested paging fault error codes */
157 unsigned int pfec = access;
b3646477 158 unsigned long rflags = static_call(kvm_x86_get_rflags)(vcpu);
97ec8c06
FW
159
160 /*
4f4aa80e
LJ
161 * For explicit supervisor accesses, SMAP is disabled if EFLAGS.AC = 1.
162 * For implicit supervisor accesses, SMAP cannot be overridden.
97ec8c06 163 *
4f4aa80e
LJ
164 * SMAP works on supervisor accesses only, and not_smap can
165 * be set or not set when user access with neither has any bearing
166 * on the result.
97ec8c06 167 *
4f4aa80e
LJ
168 * We put the SMAP checking bit in place of the PFERR_RSVD_MASK bit;
169 * this bit will always be zero in pfec, but it will be one in index
170 * if SMAP checks are being disabled.
97ec8c06 171 */
4f4aa80e
LJ
172 u64 implicit_access = access & PFERR_IMPLICIT_ACCESS;
173 bool not_smap = ((rflags & X86_EFLAGS_AC) | implicit_access) == X86_EFLAGS_AC;
174 int index = (pfec + (not_smap << PFERR_RSVD_BIT)) >> 1;
be94f6b7 175 bool fault = (mmu->permissions[index] >> pte_access) & 1;
7a98205d 176 u32 errcode = PFERR_PRESENT_MASK;
97ec8c06 177
be94f6b7 178 WARN_ON(pfec & (PFERR_PK_MASK | PFERR_RSVD_MASK));
be94f6b7
HH
179 if (unlikely(mmu->pkru_mask)) {
180 u32 pkru_bits, offset;
181
182 /*
183 * PKRU defines 32 bits, there are 16 domains and 2
184 * attribute bits per domain in pkru. pte_pkey is the
185 * index of the protection domain, so pte_pkey * 2 is
186 * is the index of the first bit for the domain.
187 */
b9dd21e1 188 pkru_bits = (vcpu->arch.pkru >> (pte_pkey * 2)) & 3;
be94f6b7
HH
189
190 /* clear present bit, replace PFEC.RSVD with ACC_USER_MASK. */
7a98205d 191 offset = (pfec & ~1) +
be94f6b7
HH
192 ((pte_access & PT_USER_MASK) << (PFERR_RSVD_BIT - PT_USER_SHIFT));
193
194 pkru_bits &= mmu->pkru_mask >> offset;
7a98205d 195 errcode |= -pkru_bits & PFERR_PK_MASK;
be94f6b7
HH
196 fault |= (pkru_bits != 0);
197 }
198
7a98205d 199 return -(u32)fault & errcode;
bebb106a 200}
97d64b78 201
efdfe536 202void kvm_zap_gfn_range(struct kvm *kvm, gfn_t gfn_start, gfn_t gfn_end);
547ffaed 203
6ca9a6f3 204int kvm_arch_write_log_dirty(struct kvm_vcpu *vcpu);
1aa9b957
JS
205
206int kvm_mmu_post_init_vm(struct kvm *kvm);
207void kvm_mmu_pre_destroy_vm(struct kvm *kvm);
208
1e76a3ce 209static inline bool kvm_shadow_root_allocated(struct kvm *kvm)
e2209710 210{
d501f747 211 /*
1e76a3ce
DS
212 * Read shadow_root_allocated before related pointers. Hence, threads
213 * reading shadow_root_allocated in any lock context are guaranteed to
214 * see the pointers. Pairs with smp_store_release in
215 * mmu_first_shadow_root_alloc.
d501f747 216 */
1e76a3ce
DS
217 return smp_load_acquire(&kvm->arch.shadow_root_allocated);
218}
219
220#ifdef CONFIG_X86_64
221static inline bool is_tdp_mmu_enabled(struct kvm *kvm) { return kvm->arch.tdp_mmu_enabled; }
222#else
223static inline bool is_tdp_mmu_enabled(struct kvm *kvm) { return false; }
224#endif
225
226static inline bool kvm_memslots_have_rmaps(struct kvm *kvm)
227{
228 return !is_tdp_mmu_enabled(kvm) || kvm_shadow_root_allocated(kvm);
e2209710
BG
229}
230
4139b197
PX
231static inline gfn_t gfn_to_index(gfn_t gfn, gfn_t base_gfn, int level)
232{
233 /* KVM_HPAGE_GFN_SHIFT(PG_LEVEL_4K) must be 0. */
234 return (gfn >> KVM_HPAGE_GFN_SHIFT(level)) -
235 (base_gfn >> KVM_HPAGE_GFN_SHIFT(level));
236}
237
238static inline unsigned long
239__kvm_mmu_slot_lpages(struct kvm_memory_slot *slot, unsigned long npages,
240 int level)
241{
242 return gfn_to_index(slot->base_gfn + npages - 1,
243 slot->base_gfn, level) + 1;
244}
245
246static inline unsigned long
247kvm_mmu_slot_lpages(struct kvm_memory_slot *slot, int level)
248{
249 return __kvm_mmu_slot_lpages(slot, slot->npages, level);
250}
251
71f51d2c
MZ
252static inline void kvm_update_page_stats(struct kvm *kvm, int level, int count)
253{
254 atomic64_add(count, &kvm->stat.pages[level - 1]);
255}
c59a0f57 256
5b22bbe7 257gpa_t translate_nested_gpa(struct kvm_vcpu *vcpu, gpa_t gpa, u64 access,
c59a0f57
LJ
258 struct x86_exception *exception);
259
260static inline gpa_t kvm_translate_gpa(struct kvm_vcpu *vcpu,
261 struct kvm_mmu *mmu,
5b22bbe7 262 gpa_t gpa, u64 access,
c59a0f57
LJ
263 struct x86_exception *exception)
264{
265 if (mmu != &vcpu->arch.nested_mmu)
266 return gpa;
267 return translate_nested_gpa(vcpu, gpa, access, exception);
268}
1d737c8a 269#endif