powerpc/mm/hash64: Map all the kernel regions in the same 0xc range
[linux-2.6-block.git] / arch / x86 / kvm / kvm_cache_regs.h
CommitLineData
b2441318 1/* SPDX-License-Identifier: GPL-2.0 */
5fdbf976
MT
2#ifndef ASM_KVM_CACHE_REGS_H
3#define ASM_KVM_CACHE_REGS_H
4
8ba2e525
SC
5#include <linux/kvm_host.h>
6
8ae09912
AK
7#define KVM_POSSIBLE_CR0_GUEST_BITS X86_CR0_TS
8#define KVM_POSSIBLE_CR4_GUEST_BITS \
9 (X86_CR4_PVI | X86_CR4_DE | X86_CR4_PCE | X86_CR4_OSFXSR \
fd8cb433 10 | X86_CR4_OSXMMEXCPT | X86_CR4_LA57 | X86_CR4_PGE)
8ae09912 11
5fdbf976
MT
12static inline unsigned long kvm_register_read(struct kvm_vcpu *vcpu,
13 enum kvm_reg reg)
14{
15 if (!test_bit(reg, (unsigned long *)&vcpu->arch.regs_avail))
16 kvm_x86_ops->cache_reg(vcpu, reg);
17
18 return vcpu->arch.regs[reg];
19}
20
21static inline void kvm_register_write(struct kvm_vcpu *vcpu,
22 enum kvm_reg reg,
23 unsigned long val)
24{
25 vcpu->arch.regs[reg] = val;
26 __set_bit(reg, (unsigned long *)&vcpu->arch.regs_dirty);
27 __set_bit(reg, (unsigned long *)&vcpu->arch.regs_avail);
28}
29
30static inline unsigned long kvm_rip_read(struct kvm_vcpu *vcpu)
31{
32 return kvm_register_read(vcpu, VCPU_REGS_RIP);
33}
34
35static inline void kvm_rip_write(struct kvm_vcpu *vcpu, unsigned long val)
36{
37 kvm_register_write(vcpu, VCPU_REGS_RIP, val);
38}
39
6de4f3ad
AK
40static inline u64 kvm_pdptr_read(struct kvm_vcpu *vcpu, int index)
41{
08acfa18
AK
42 might_sleep(); /* on svm */
43
6de4f3ad
AK
44 if (!test_bit(VCPU_EXREG_PDPTR,
45 (unsigned long *)&vcpu->arch.regs_avail))
1df372f4 46 kvm_x86_ops->cache_reg(vcpu, (enum kvm_reg)VCPU_EXREG_PDPTR);
6de4f3ad 47
ff03a073 48 return vcpu->arch.walk_mmu->pdptrs[index];
6de4f3ad
AK
49}
50
4d4ec087
AK
51static inline ulong kvm_read_cr0_bits(struct kvm_vcpu *vcpu, ulong mask)
52{
8ae09912
AK
53 ulong tmask = mask & KVM_POSSIBLE_CR0_GUEST_BITS;
54 if (tmask & vcpu->arch.cr0_guest_owned_bits)
e8467fda 55 kvm_x86_ops->decache_cr0_guest_bits(vcpu);
4d4ec087
AK
56 return vcpu->arch.cr0 & mask;
57}
58
59static inline ulong kvm_read_cr0(struct kvm_vcpu *vcpu)
60{
61 return kvm_read_cr0_bits(vcpu, ~0UL);
62}
63
fc78f519
AK
64static inline ulong kvm_read_cr4_bits(struct kvm_vcpu *vcpu, ulong mask)
65{
8ae09912
AK
66 ulong tmask = mask & KVM_POSSIBLE_CR4_GUEST_BITS;
67 if (tmask & vcpu->arch.cr4_guest_owned_bits)
fc78f519
AK
68 kvm_x86_ops->decache_cr4_guest_bits(vcpu);
69 return vcpu->arch.cr4 & mask;
70}
71
9f8fe504
AK
72static inline ulong kvm_read_cr3(struct kvm_vcpu *vcpu)
73{
aff48baa
AK
74 if (!test_bit(VCPU_EXREG_CR3, (ulong *)&vcpu->arch.regs_avail))
75 kvm_x86_ops->decache_cr3(vcpu);
9f8fe504
AK
76 return vcpu->arch.cr3;
77}
78
fc78f519
AK
79static inline ulong kvm_read_cr4(struct kvm_vcpu *vcpu)
80{
81 return kvm_read_cr4_bits(vcpu, ~0UL);
82}
83
2acf923e
DC
84static inline u64 kvm_read_edx_eax(struct kvm_vcpu *vcpu)
85{
86 return (kvm_register_read(vcpu, VCPU_REGS_RAX) & -1u)
87 | ((u64)(kvm_register_read(vcpu, VCPU_REGS_RDX) & -1u) << 32);
88}
89
ec9e60b2
JR
90static inline void enter_guest_mode(struct kvm_vcpu *vcpu)
91{
92 vcpu->arch.hflags |= HF_GUEST_MASK;
93}
94
95static inline void leave_guest_mode(struct kvm_vcpu *vcpu)
96{
97 vcpu->arch.hflags &= ~HF_GUEST_MASK;
e40ff1d6
LA
98
99 if (vcpu->arch.load_eoi_exitmap_pending) {
100 vcpu->arch.load_eoi_exitmap_pending = false;
101 kvm_make_request(KVM_REQ_LOAD_EOI_EXITMAP, vcpu);
102 }
ec9e60b2
JR
103}
104
105static inline bool is_guest_mode(struct kvm_vcpu *vcpu)
106{
107 return vcpu->arch.hflags & HF_GUEST_MASK;
108}
109
f077825a
PB
110static inline bool is_smm(struct kvm_vcpu *vcpu)
111{
112 return vcpu->arch.hflags & HF_SMM_MASK;
113}
114
5fdbf976 115#endif