KVM: x86: Add macro to ensure reserved cr4 bits checks stay in sync
authorSean Christopherson <sean.j.christopherson@intel.com>
Tue, 10 Dec 2019 22:44:16 +0000 (14:44 -0800)
committerPaolo Bonzini <pbonzini@redhat.com>
Tue, 21 Jan 2020 12:58:16 +0000 (13:58 +0100)
Add a helper macro to generate the set of reserved cr4 bits for both
host and guest to ensure that adding a check on guest capabilities is
also added for host capabilities, and vice versa.

Signed-off-by: Sean Christopherson <sean.j.christopherson@intel.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
arch/x86/kvm/x86.c

index 960b886e1e436090bff3589c1111535f22702c67..456fc131c95efc3b28d92e9f8a32e38f004e9921 100644 (file)
@@ -881,31 +881,34 @@ int kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr)
 }
 EXPORT_SYMBOL_GPL(kvm_set_xcr);
 
+#define __cr4_reserved_bits(__cpu_has, __c)            \
+({                                                     \
+       u64 __reserved_bits = CR4_RESERVED_BITS;        \
+                                                       \
+       if (!__cpu_has(__c, X86_FEATURE_XSAVE))         \
+               __reserved_bits |= X86_CR4_OSXSAVE;     \
+       if (!__cpu_has(__c, X86_FEATURE_SMEP))          \
+               __reserved_bits |= X86_CR4_SMEP;        \
+       if (!__cpu_has(__c, X86_FEATURE_SMAP))          \
+               __reserved_bits |= X86_CR4_SMAP;        \
+       if (!__cpu_has(__c, X86_FEATURE_FSGSBASE))      \
+               __reserved_bits |= X86_CR4_FSGSBASE;    \
+       if (!__cpu_has(__c, X86_FEATURE_PKU))           \
+               __reserved_bits |= X86_CR4_PKE;         \
+       if (!__cpu_has(__c, X86_FEATURE_LA57))          \
+               __reserved_bits |= X86_CR4_LA57;        \
+       __reserved_bits;                                \
+})
+
 static u64 kvm_host_cr4_reserved_bits(struct cpuinfo_x86 *c)
 {
-       u64 reserved_bits = CR4_RESERVED_BITS;
-
-       if (!cpu_has(c, X86_FEATURE_XSAVE))
-               reserved_bits |= X86_CR4_OSXSAVE;
-
-       if (!cpu_has(c, X86_FEATURE_SMEP))
-               reserved_bits |= X86_CR4_SMEP;
-
-       if (!cpu_has(c, X86_FEATURE_SMAP))
-               reserved_bits |= X86_CR4_SMAP;
-
-       if (!cpu_has(c, X86_FEATURE_FSGSBASE))
-               reserved_bits |= X86_CR4_FSGSBASE;
+       u64 reserved_bits = __cr4_reserved_bits(cpu_has, c);
 
-       if (!cpu_has(c, X86_FEATURE_PKU))
-               reserved_bits |= X86_CR4_PKE;
+       if (cpuid_ecx(0x7) & bit(X86_FEATURE_LA57))
+               reserved_bits &= ~X86_CR4_LA57;
 
-       if (!cpu_has(c, X86_FEATURE_LA57) &&
-           !(cpuid_ecx(0x7) & bit(X86_FEATURE_LA57)))
-               reserved_bits |= X86_CR4_LA57;
-
-       if (!cpu_has(c, X86_FEATURE_UMIP) && !kvm_x86_ops->umip_emulated())
-               reserved_bits |= X86_CR4_UMIP;
+       if (kvm_x86_ops->umip_emulated())
+               reserved_bits &= ~X86_CR4_UMIP;
 
        return reserved_bits;
 }
@@ -915,25 +918,7 @@ static int kvm_valid_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
        if (cr4 & cr4_reserved_bits)
                return -EINVAL;
 
-       if (!guest_cpuid_has(vcpu, X86_FEATURE_XSAVE) && (cr4 & X86_CR4_OSXSAVE))
-               return -EINVAL;
-
-       if (!guest_cpuid_has(vcpu, X86_FEATURE_SMEP) && (cr4 & X86_CR4_SMEP))
-               return -EINVAL;
-
-       if (!guest_cpuid_has(vcpu, X86_FEATURE_SMAP) && (cr4 & X86_CR4_SMAP))
-               return -EINVAL;
-
-       if (!guest_cpuid_has(vcpu, X86_FEATURE_FSGSBASE) && (cr4 & X86_CR4_FSGSBASE))
-               return -EINVAL;
-
-       if (!guest_cpuid_has(vcpu, X86_FEATURE_PKU) && (cr4 & X86_CR4_PKE))
-               return -EINVAL;
-
-       if (!guest_cpuid_has(vcpu, X86_FEATURE_LA57) && (cr4 & X86_CR4_LA57))
-               return -EINVAL;
-
-       if (!guest_cpuid_has(vcpu, X86_FEATURE_UMIP) && (cr4 & X86_CR4_UMIP))
+       if (cr4 & __cr4_reserved_bits(guest_cpuid_has, vcpu))
                return -EINVAL;
 
        return 0;